diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE
new file mode 100644
index 0000000..2f9a31f
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Alex Saskevich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md
new file mode 100644
index 0000000..0053e25
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/README.md
@@ -0,0 +1,402 @@
+govalidator
+===========
+[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
+[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator)
+
+A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
+
+#### Installation
+Make sure that Go is installed on your computer.
+Type the following command in your terminal:
+
+ go get github.com/asaskevich/govalidator
+
+or you can get specified release of the package with `gopkg.in`:
+
+ go get gopkg.in/asaskevich/govalidator.v4
+
+After it the package is ready to use.
+
+
+#### Import package in your project
+Add following line in your `*.go` file:
+```go
+import "github.com/asaskevich/govalidator"
+```
+If you are unhappy to use long `govalidator`, you can do something like this:
+```go
+import (
+ valid "github.com/asaskevich/govalidator"
+)
+```
+
+#### Activate behavior to require all fields have a validation tag by default
+`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
+
+```go
+import "github.com/asaskevich/govalidator"
+
+func init() {
+ govalidator.SetFieldsRequiredByDefault(true)
+}
+```
+
+Here's some code to explain it:
+```go
+// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+type exampleStruct struct {
+ Name string ``
+ Email string `valid:"email"`
+}
+
+// this, however, will only fail when Email is empty or an invalid email address:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email"`
+}
+
+// lastly, this will only fail when Email is an invalid email address but not when it's empty:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email,optional"`
+}
+```
+
+#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
+##### Custom validator function signature
+A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
+```go
+import "github.com/asaskevich/govalidator"
+
+// old signature
+func(i interface{}) bool
+
+// new signature
+func(i interface{}, o interface{}) bool
+```
+
+##### Adding a custom validator
+This was changed to prevent data races when accessing custom validators.
+```go
+import "github.com/asaskevich/govalidator"
+
+// before
+govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool {
+ // ...
+})
+
+// after
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool {
+ // ...
+}))
+```
+
+#### List of functions:
+```go
+func Abs(value float64) float64
+func BlackList(str, chars string) string
+func ByteLength(str string, params ...string) bool
+func StringLength(str string, params ...string) bool
+func StringMatches(s string, params ...string) bool
+func CamelCaseToUnderscore(str string) string
+func Contains(str, substring string) bool
+func Count(array []interface{}, iterator ConditionIterator) int
+func Each(array []interface{}, iterator Iterator)
+func ErrorByField(e error, field string) string
+func Filter(array []interface{}, iterator ConditionIterator) []interface{}
+func Find(array []interface{}, iterator ConditionIterator) interface{}
+func GetLine(s string, index int) (string, error)
+func GetLines(s string) []string
+func IsHost(s string) bool
+func InRange(value, left, right float64) bool
+func IsASCII(str string) bool
+func IsAlpha(str string) bool
+func IsAlphanumeric(str string) bool
+func IsBase64(str string) bool
+func IsByteLength(str string, min, max int) bool
+func IsCreditCard(str string) bool
+func IsDataURI(str string) bool
+func IsDialString(str string) bool
+func IsDNSName(str string) bool
+func IsDivisibleBy(str, num string) bool
+func IsEmail(str string) bool
+func IsFilePath(str string) (bool, int)
+func IsFloat(str string) bool
+func IsFullWidth(str string) bool
+func IsHalfWidth(str string) bool
+func IsHexadecimal(str string) bool
+func IsHexcolor(str string) bool
+func IsIP(str string) bool
+func IsIPv4(str string) bool
+func IsIPv6(str string) bool
+func IsISBN(str string, version int) bool
+func IsISBN10(str string) bool
+func IsISBN13(str string) bool
+func IsISO3166Alpha2(str string) bool
+func IsISO3166Alpha3(str string) bool
+func IsInt(str string) bool
+func IsJSON(str string) bool
+func IsLatitude(str string) bool
+func IsLongitude(str string) bool
+func IsLowerCase(str string) bool
+func IsMAC(str string) bool
+func IsMongoID(str string) bool
+func IsMultibyte(str string) bool
+func IsNatural(value float64) bool
+func IsNegative(value float64) bool
+func IsNonNegative(value float64) bool
+func IsNonPositive(value float64) bool
+func IsNull(str string) bool
+func IsNumeric(str string) bool
+func IsPort(str string) bool
+func IsPositive(value float64) bool
+func IsPrintableASCII(str string) bool
+func IsRGBcolor(str string) bool
+func IsRequestURI(rawurl string) bool
+func IsRequestURL(rawurl string) bool
+func IsSSN(str string) bool
+func IsSemver(str string) bool
+func IsURL(str string) bool
+func IsUTFDigit(str string) bool
+func IsUTFLetter(str string) bool
+func IsUTFLetterNumeric(str string) bool
+func IsUTFNumeric(str string) bool
+func IsUUID(str string) bool
+func IsUUIDv3(str string) bool
+func IsUUIDv4(str string) bool
+func IsUUIDv5(str string) bool
+func IsUpperCase(str string) bool
+func IsVariableWidth(str string) bool
+func IsWhole(value float64) bool
+func LeftTrim(str, chars string) string
+func Map(array []interface{}, iterator ResultIterator) []interface{}
+func Matches(str, pattern string) bool
+func NormalizeEmail(str string) (string, error)
+func RemoveTags(s string) string
+func ReplacePattern(str, pattern, replace string) string
+func Reverse(s string) string
+func RightTrim(str, chars string) string
+func SafeFileName(str string) string
+func Sign(value float64) float64
+func StripLow(str string, keepNewLines bool) string
+func ToBoolean(str string) (bool, error)
+func ToFloat(str string) (float64, error)
+func ToInt(str string) (int64, error)
+func ToJSON(obj interface{}) (string, error)
+func ToString(obj interface{}) string
+func Trim(str, chars string) string
+func Truncate(str string, length int, ending string) string
+func UnderscoreToCamelCase(s string) string
+func ValidateStruct(s interface{}) (bool, error)
+func WhiteList(str, chars string) string
+type ConditionIterator
+type Error
+func (e Error) Error() string
+type Errors
+func (es Errors) Error() string
+type ISO3166Entry
+type Iterator
+type ParamValidator
+type ResultIterator
+type UnsupportedTypeError
+func (e *UnsupportedTypeError) Error() string
+type Validator
+```
+
+#### Examples
+###### IsURL
+```go
+println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
+```
+###### ToString
+```go
+type User struct {
+ FirstName string
+ LastName string
+}
+
+str := govalidator.ToString(&User{"John", "Juan"})
+println(str)
+```
+###### Each, Map, Filter, Count for slices
+Each iterates over the slice/array and calls Iterator for every item
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.Iterator = func(value interface{}, index int) {
+ println(value.(int))
+}
+govalidator.Each(data, fn)
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
+ return value.(int) * 3
+}
+_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
+ return value.(int)%2 == 0
+}
+_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
+_ = govalidator.Count(data, fn) // result = 5
+```
+###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
+If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
+```go
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+```
+For completely custom validators (interface-based), see below.
+
+Here is a list of available validators for struct fields (validator - used function):
+```go
+"alpha": IsAlpha,
+"alphanum": IsAlphanumeric,
+"ascii": IsASCII,
+"base64": IsBase64,
+"creditcard": IsCreditCard,
+"datauri": IsDataURI,
+"dialstring": IsDialString,
+"dns": IsDNSName,
+"email": IsEmail,
+"float": IsFloat,
+"fullwidth": IsFullWidth,
+"halfwidth": IsHalfWidth,
+"hexadecimal": IsHexadecimal,
+"hexcolor": IsHexcolor,
+"host": IsHost,
+"int": IsInt,
+"ip": IsIP,
+"ipv4": IsIPv4,
+"ipv6": IsIPv6,
+"isbn10": IsISBN10,
+"isbn13": IsISBN13,
+"json": IsJSON,
+"latitude": IsLatitude,
+"longitude": IsLongitude,
+"lowercase": IsLowerCase,
+"mac": IsMAC,
+"multibyte": IsMultibyte,
+"null": IsNull,
+"numeric": IsNumeric,
+"port": IsPort,
+"printableascii": IsPrintableASCII,
+"requri": IsRequestURI,
+"requrl": IsRequestURL,
+"rgbcolor": IsRGBcolor,
+"ssn": IsSSN,
+"semver": IsSemver,
+"uppercase": IsUpperCase,
+"url": IsURL,
+"utfdigit": IsUTFDigit,
+"utfletter": IsUTFLetter,
+"utfletternum": IsUTFLetterNumeric,
+"utfnumeric": IsUTFNumeric,
+"uuid": IsUUID,
+"uuidv3": IsUUIDv3,
+"uuidv4": IsUUIDv4,
+"uuidv5": IsUUIDv5,
+"variablewidth": IsVariableWidth,
+```
+Validators with parameters
+
+```go
+"length(min|max)": ByteLength,
+"runelength(min|max)": RuneLegth,
+"matches(pattern)": StringMatches,
+```
+
+And here is small example of usage:
+```go
+type Post struct {
+ Title string `valid:"alphanum,required"`
+ Message string `valid:"duck,ascii"`
+ AuthorIP string `valid:"ipv4"`
+ Date string `valid:"-"`
+}
+post := &Post{
+ Title: "My Example Post",
+ Message: "duck",
+ AuthorIP: "123.234.54.3",
+}
+
+// Add your own struct validation tags
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+
+result, err := govalidator.ValidateStruct(post)
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+###### WhiteList
+```go
+// Remove all characters from string ignoring characters between "a" and "z"
+println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
+```
+
+###### Custom validation functions
+Custom validation using your own domain specific validators is also available - here's an example of how to use it:
+```go
+import "github.com/asaskevich/govalidator"
+
+type CustomByteArray [6]byte // custom types are supported and can be validated
+
+type StructWithCustomByteArray struct {
+ ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
+ Email string `valid:"email"`
+ CustomMinLength int `valid:"-"`
+}
+
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // you can type switch on the context interface being validated
+ case StructWithCustomByteArray:
+ // you can check and validate against some other field in the context,
+ // return early or not validate against the context at all – your choice
+ case SomeOtherType:
+ // ...
+ default:
+ // expecting some other type? Throw/panic here or continue
+ }
+
+ switch v := i.(type) { // type switch on the struct field being validated
+ case CustomByteArray:
+ for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
+ if e != 0 {
+ return true
+ }
+ }
+ }
+ return false
+}))
+govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
+ case StructWithCustomByteArray:
+ return len(v.ID) >= v.CustomMinLength
+ }
+ return false
+}))
+```
+
+#### Notes
+Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
+Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
+
+#### Support
+If you do have a contribution for the package feel free to put up a Pull Request or open Issue.
+
+#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
+* [Daniel Lohse](https://github.com/annismckenzie)
+* [Attila Oláh](https://github.com/attilaolah)
+* [Daniel Korner](https://github.com/Dadie)
+* [Steven Wilkin](https://github.com/stevenwilkin)
+* [Deiwin Sarjas](https://github.com/deiwin)
+* [Noah Shibley](https://github.com/slugmobile)
+* [Nathan Davies](https://github.com/nathj07)
+* [Matt Sanford](https://github.com/mzsanford)
+* [Simon ccl1115](https://github.com/ccl1115)
diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go
new file mode 100644
index 0000000..5bace26
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/arrays.go
@@ -0,0 +1,58 @@
+package govalidator
+
+// Iterator is the function that accepts element of slice/array and its index
+type Iterator func(interface{}, int)
+
+// ResultIterator is the function that accepts element of slice/array and its index and returns any result
+type ResultIterator func(interface{}, int) interface{}
+
+// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
+type ConditionIterator func(interface{}, int) bool
+
+// Each iterates over the slice and apply Iterator to every item
+func Each(array []interface{}, iterator Iterator) {
+ for index, data := range array {
+ iterator(data, index)
+ }
+}
+
+// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
+func Map(array []interface{}, iterator ResultIterator) []interface{} {
+ var result = make([]interface{}, len(array))
+ for index, data := range array {
+ result[index] = iterator(data, index)
+ }
+ return result
+}
+
+// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
+func Find(array []interface{}, iterator ConditionIterator) interface{} {
+ for index, data := range array {
+ if iterator(data, index) {
+ return data
+ }
+ }
+ return nil
+}
+
+// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
+func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
+ var result = make([]interface{}, 0)
+ for index, data := range array {
+ if iterator(data, index) {
+ result = append(result, data)
+ }
+ }
+ return result
+}
+
+// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
+func Count(array []interface{}, iterator ConditionIterator) int {
+ count := 0
+ for index, data := range array {
+ if iterator(data, index) {
+ count = count + 1
+ }
+ }
+ return count
+}
diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go
new file mode 100644
index 0000000..737a1f1
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/converter.go
@@ -0,0 +1,49 @@
+package govalidator
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+)
+
+// ToString convert the input to a string.
+func ToString(obj interface{}) string {
+ res := fmt.Sprintf("%v", obj)
+ return string(res)
+}
+
+// ToJSON convert the input to a valid JSON string
+func ToJSON(obj interface{}) (string, error) {
+ res, err := json.Marshal(obj)
+ if err != nil {
+ res = []byte("")
+ }
+ return string(res), err
+}
+
+// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
+func ToFloat(str string) (float64, error) {
+ res, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ res = 0.0
+ }
+ return res, err
+}
+
+// ToInt convert the input string to an integer, or 0 if the input is not an integer.
+func ToInt(str string) (int64, error) {
+ res, err := strconv.ParseInt(str, 0, 64)
+ if err != nil {
+ res = 0
+ }
+ return res, err
+}
+
+// ToBoolean convert the input string to a boolean.
+func ToBoolean(str string) (bool, error) {
+ res, err := strconv.ParseBool(str)
+ if err != nil {
+ res = false
+ }
+ return res, err
+}
diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go
new file mode 100644
index 0000000..280b1c4
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/error.go
@@ -0,0 +1,31 @@
+package govalidator
+
+// Errors is an array of multiple errors and conforms to the error interface.
+type Errors []error
+
+// Errors returns itself.
+func (es Errors) Errors() []error {
+ return es
+}
+
+func (es Errors) Error() string {
+ var err string
+ for _, e := range es {
+ err += e.Error() + ";"
+ }
+ return err
+}
+
+// Error encapsulates a name, an error and whether there's a custom error message or not.
+type Error struct {
+ Name string
+ Err error
+ CustomErrorMessageExists bool
+}
+
+func (e Error) Error() string {
+ if e.CustomErrorMessageExists {
+ return e.Err.Error()
+ }
+ return e.Name + ": " + e.Err.Error()
+}
diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go
new file mode 100644
index 0000000..737cd47
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/numerics.go
@@ -0,0 +1,57 @@
+package govalidator
+
+import "math"
+
+// Abs returns absolute value of number
+func Abs(value float64) float64 {
+ return value * Sign(value)
+}
+
+// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
+func Sign(value float64) float64 {
+ if value > 0 {
+ return 1
+ } else if value < 0 {
+ return -1
+ } else {
+ return 0
+ }
+}
+
+// IsNegative returns true if value < 0
+func IsNegative(value float64) bool {
+ return value < 0
+}
+
+// IsPositive returns true if value > 0
+func IsPositive(value float64) bool {
+ return value > 0
+}
+
+// IsNonNegative returns true if value >= 0
+func IsNonNegative(value float64) bool {
+ return value >= 0
+}
+
+// IsNonPositive returns true if value <= 0
+func IsNonPositive(value float64) bool {
+ return value <= 0
+}
+
+// InRange returns true if value lies between left and right border
+func InRange(value, left, right float64) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// IsWhole returns true if value is whole number
+func IsWhole(value float64) bool {
+ return Abs(math.Remainder(value, 1)) == 0
+}
+
+// IsNatural returns true if value is natural number (positive and whole)
+func IsNatural(value float64) bool {
+ return IsWhole(value) && IsPositive(value)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go
new file mode 100644
index 0000000..b88cc95
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/patterns.go
@@ -0,0 +1,91 @@
+package govalidator
+
+import "regexp"
+
+// Basic regular expressions for validating strings
+const (
+ Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
+ CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
+ ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
+ ISBN13 string = "^(?:[0-9]{13})$"
+ UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ Alpha string = "^[a-zA-Z]+$"
+ Alphanumeric string = "^[a-zA-Z0-9]+$"
+ Numeric string = "^[-+]?[0-9]+$"
+ Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
+ Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
+ Hexadecimal string = "^[0-9a-fA-F]+$"
+ Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
+ ASCII string = "^[\x00-\x7F]+$"
+ Multibyte string = "[^\x00-\x7F]"
+ FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ PrintableASCII string = "^[\x20-\x7E]+$"
+ DataURI string = "^data:.+\\/(.+);base64$"
+ Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
+ Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
+ DNSName string = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62})*$`
+ IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
+ URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
+ URLUsername string = `(\S+(:\S*)?@)`
+ Hostname string = ``
+ URLPath string = `((\/|\?|#)[^\s]*)`
+ URLPort string = `(:(\d{1,5}))`
+ URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
+ URLSubdomain string = `((www\.)|([a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*))`
+ URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))` + URLPort + `?` + URLPath + `?$`
+ SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
+ WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
+ UnixPath string = `^(/[^/\x00]*)+/?$`
+ Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
+ tagName string = "valid"
+)
+
+// Used by IsFilePath func
+const (
+ // Unknown is unresolved OS type
+ Unknown = iota
+ // Win is Windows type
+ Win
+ // Unix is *nix OS types
+ Unix
+)
+
+var (
+ rxEmail = regexp.MustCompile(Email)
+ rxCreditCard = regexp.MustCompile(CreditCard)
+ rxISBN10 = regexp.MustCompile(ISBN10)
+ rxISBN13 = regexp.MustCompile(ISBN13)
+ rxUUID3 = regexp.MustCompile(UUID3)
+ rxUUID4 = regexp.MustCompile(UUID4)
+ rxUUID5 = regexp.MustCompile(UUID5)
+ rxUUID = regexp.MustCompile(UUID)
+ rxAlpha = regexp.MustCompile(Alpha)
+ rxAlphanumeric = regexp.MustCompile(Alphanumeric)
+ rxNumeric = regexp.MustCompile(Numeric)
+ rxInt = regexp.MustCompile(Int)
+ rxFloat = regexp.MustCompile(Float)
+ rxHexadecimal = regexp.MustCompile(Hexadecimal)
+ rxHexcolor = regexp.MustCompile(Hexcolor)
+ rxRGBcolor = regexp.MustCompile(RGBcolor)
+ rxASCII = regexp.MustCompile(ASCII)
+ rxPrintableASCII = regexp.MustCompile(PrintableASCII)
+ rxMultibyte = regexp.MustCompile(Multibyte)
+ rxFullWidth = regexp.MustCompile(FullWidth)
+ rxHalfWidth = regexp.MustCompile(HalfWidth)
+ rxBase64 = regexp.MustCompile(Base64)
+ rxDataURI = regexp.MustCompile(DataURI)
+ rxLatitude = regexp.MustCompile(Latitude)
+ rxLongitude = regexp.MustCompile(Longitude)
+ rxDNSName = regexp.MustCompile(DNSName)
+ rxURL = regexp.MustCompile(URL)
+ rxSSN = regexp.MustCompile(SSN)
+ rxWinPath = regexp.MustCompile(WinPath)
+ rxUnixPath = regexp.MustCompile(UnixPath)
+ rxSemver = regexp.MustCompile(Semver)
+)
diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go
new file mode 100644
index 0000000..c5f1f21
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/types.go
@@ -0,0 +1,381 @@
+package govalidator
+
+import (
+ "reflect"
+ "regexp"
+ "sync"
+)
+
+// Validator is a wrapper for a validator function that returns bool and accepts string.
+type Validator func(str string) bool
+
+// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
+// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
+type CustomTypeValidator func(i interface{}, o interface{}) bool
+
+// ParamValidator is a wrapper for validator functions that accepts additional parameters.
+type ParamValidator func(str string, params ...string) bool
+type tagOptionsMap map[string]string
+
+// UnsupportedTypeError is a wrapper for reflect.Type
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+// ParamTagMap is a map of functions accept variants parameters
+var ParamTagMap = map[string]ParamValidator{
+ "length": ByteLength,
+ "runelength": RuneLength,
+ "stringlength": StringLength,
+ "matches": StringMatches,
+}
+
+// ParamTagRegexMap maps param tags to their respective regexes.
+var ParamTagRegexMap = map[string]*regexp.Regexp{
+ "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
+ "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
+ "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
+ "matches": regexp.MustCompile(`^matches\((.+)\)$`),
+}
+
+type customTypeTagMap struct {
+ validators map[string]CustomTypeValidator
+
+ sync.RWMutex
+}
+
+func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
+ tm.RLock()
+ defer tm.RUnlock()
+ v, ok := tm.validators[name]
+ return v, ok
+}
+
+func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
+ tm.Lock()
+ defer tm.Unlock()
+ tm.validators[name] = ctv
+}
+
+// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
+// Use this to validate compound or custom types that need to be handled as a whole, e.g.
+// `type UUID [16]byte` (this would be handled as an array of bytes).
+var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
+
+// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
+var TagMap = map[string]Validator{
+ "email": IsEmail,
+ "url": IsURL,
+ "dialstring": IsDialString,
+ "requrl": IsRequestURL,
+ "requri": IsRequestURI,
+ "alpha": IsAlpha,
+ "utfletter": IsUTFLetter,
+ "alphanum": IsAlphanumeric,
+ "utfletternum": IsUTFLetterNumeric,
+ "numeric": IsNumeric,
+ "utfnumeric": IsUTFNumeric,
+ "utfdigit": IsUTFDigit,
+ "hexadecimal": IsHexadecimal,
+ "hexcolor": IsHexcolor,
+ "rgbcolor": IsRGBcolor,
+ "lowercase": IsLowerCase,
+ "uppercase": IsUpperCase,
+ "int": IsInt,
+ "float": IsFloat,
+ "null": IsNull,
+ "uuid": IsUUID,
+ "uuidv3": IsUUIDv3,
+ "uuidv4": IsUUIDv4,
+ "uuidv5": IsUUIDv5,
+ "creditcard": IsCreditCard,
+ "isbn10": IsISBN10,
+ "isbn13": IsISBN13,
+ "json": IsJSON,
+ "multibyte": IsMultibyte,
+ "ascii": IsASCII,
+ "printableascii": IsPrintableASCII,
+ "fullwidth": IsFullWidth,
+ "halfwidth": IsHalfWidth,
+ "variablewidth": IsVariableWidth,
+ "base64": IsBase64,
+ "datauri": IsDataURI,
+ "ip": IsIP,
+ "port": IsPort,
+ "ipv4": IsIPv4,
+ "ipv6": IsIPv6,
+ "dns": IsDNSName,
+ "host": IsHost,
+ "mac": IsMAC,
+ "latitude": IsLatitude,
+ "longitude": IsLongitude,
+ "ssn": IsSSN,
+ "semver": IsSemver,
+ "rfc3339": IsRFC3339,
+}
+
+// ISO3166Entry stores country codes
+type ISO3166Entry struct {
+ EnglishShortName string
+ FrenchShortName string
+ Alpha2Code string
+ Alpha3Code string
+ Numeric string
+}
+
+//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
+var ISO3166List = []ISO3166Entry{
+ {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
+ {"Albania", "Albanie (l')", "AL", "ALB", "008"},
+ {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
+ {"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
+ {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
+ {"Andorra", "Andorre (l')", "AD", "AND", "020"},
+ {"Angola", "Angola (l')", "AO", "AGO", "024"},
+ {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
+ {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
+ {"Argentina", "Argentine (l')", "AR", "ARG", "032"},
+ {"Australia", "Australie (l')", "AU", "AUS", "036"},
+ {"Austria", "Autriche (l')", "AT", "AUT", "040"},
+ {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
+ {"Bahrain", "Bahreïn", "BH", "BHR", "048"},
+ {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
+ {"Armenia", "Arménie (l')", "AM", "ARM", "051"},
+ {"Barbados", "Barbade (la)", "BB", "BRB", "052"},
+ {"Belgium", "Belgique (la)", "BE", "BEL", "056"},
+ {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
+ {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
+ {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
+ {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
+ {"Botswana", "Botswana (le)", "BW", "BWA", "072"},
+ {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
+ {"Brazil", "Brésil (le)", "BR", "BRA", "076"},
+ {"Belize", "Belize (le)", "BZ", "BLZ", "084"},
+ {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
+ {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
+ {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
+ {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
+ {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
+ {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
+ {"Burundi", "Burundi (le)", "BI", "BDI", "108"},
+ {"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
+ {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
+ {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
+ {"Canada", "Canada (le)", "CA", "CAN", "124"},
+ {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
+ {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
+ {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
+ {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
+ {"Chad", "Tchad (le)", "TD", "TCD", "148"},
+ {"Chile", "Chili (le)", "CL", "CHL", "152"},
+ {"China", "Chine (la)", "CN", "CHN", "156"},
+ {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
+ {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
+ {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
+ {"Colombia", "Colombie (la)", "CO", "COL", "170"},
+ {"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
+ {"Mayotte", "Mayotte", "YT", "MYT", "175"},
+ {"Congo (the)", "Congo (le)", "CG", "COG", "178"},
+ {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
+ {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
+ {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
+ {"Croatia", "Croatie (la)", "HR", "HRV", "191"},
+ {"Cuba", "Cuba", "CU", "CUB", "192"},
+ {"Cyprus", "Chypre", "CY", "CYP", "196"},
+ {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
+ {"Benin", "Bénin (le)", "BJ", "BEN", "204"},
+ {"Denmark", "Danemark (le)", "DK", "DNK", "208"},
+ {"Dominica", "Dominique (la)", "DM", "DMA", "212"},
+ {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
+ {"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
+ {"El Salvador", "El Salvador", "SV", "SLV", "222"},
+ {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
+ {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
+ {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
+ {"Estonia", "Estonie (l')", "EE", "EST", "233"},
+ {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
+ {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
+ {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
+ {"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
+ {"Finland", "Finlande (la)", "FI", "FIN", "246"},
+ {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
+ {"France", "France (la)", "FR", "FRA", "250"},
+ {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
+ {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
+ {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
+ {"Djibouti", "Djibouti", "DJ", "DJI", "262"},
+ {"Gabon", "Gabon (le)", "GA", "GAB", "266"},
+ {"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
+ {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
+ {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
+ {"Germany", "Allemagne (l')", "DE", "DEU", "276"},
+ {"Ghana", "Ghana (le)", "GH", "GHA", "288"},
+ {"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
+ {"Kiribati", "Kiribati", "KI", "KIR", "296"},
+ {"Greece", "Grèce (la)", "GR", "GRC", "300"},
+ {"Greenland", "Groenland (le)", "GL", "GRL", "304"},
+ {"Grenada", "Grenade (la)", "GD", "GRD", "308"},
+ {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
+ {"Guam", "Guam", "GU", "GUM", "316"},
+ {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
+ {"Guinea", "Guinée (la)", "GN", "GIN", "324"},
+ {"Guyana", "Guyana (le)", "GY", "GUY", "328"},
+ {"Haiti", "Haïti", "HT", "HTI", "332"},
+ {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
+ {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
+ {"Honduras", "Honduras (le)", "HN", "HND", "340"},
+ {"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
+ {"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
+ {"Iceland", "Islande (l')", "IS", "ISL", "352"},
+ {"India", "Inde (l')", "IN", "IND", "356"},
+ {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
+ {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
+ {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
+ {"Ireland", "Irlande (l')", "IE", "IRL", "372"},
+ {"Israel", "Israël", "IL", "ISR", "376"},
+ {"Italy", "Italie (l')", "IT", "ITA", "380"},
+ {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
+ {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
+ {"Japan", "Japon (le)", "JP", "JPN", "392"},
+ {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
+ {"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
+ {"Kenya", "Kenya (le)", "KE", "KEN", "404"},
+ {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
+ {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
+ {"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
+ {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
+ {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
+ {"Lebanon", "Liban (le)", "LB", "LBN", "422"},
+ {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
+ {"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
+ {"Liberia", "Libéria (le)", "LR", "LBR", "430"},
+ {"Libya", "Libye (la)", "LY", "LBY", "434"},
+ {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
+ {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
+ {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
+ {"Macao", "Macao", "MO", "MAC", "446"},
+ {"Madagascar", "Madagascar", "MG", "MDG", "450"},
+ {"Malawi", "Malawi (le)", "MW", "MWI", "454"},
+ {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
+ {"Maldives", "Maldives (les)", "MV", "MDV", "462"},
+ {"Mali", "Mali (le)", "ML", "MLI", "466"},
+ {"Malta", "Malte", "MT", "MLT", "470"},
+ {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
+ {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
+ {"Mauritius", "Maurice", "MU", "MUS", "480"},
+ {"Mexico", "Mexique (le)", "MX", "MEX", "484"},
+ {"Monaco", "Monaco", "MC", "MCO", "492"},
+ {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
+ {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
+ {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
+ {"Montserrat", "Montserrat", "MS", "MSR", "500"},
+ {"Morocco", "Maroc (le)", "MA", "MAR", "504"},
+ {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
+ {"Oman", "Oman", "OM", "OMN", "512"},
+ {"Namibia", "Namibie (la)", "NA", "NAM", "516"},
+ {"Nauru", "Nauru", "NR", "NRU", "520"},
+ {"Nepal", "Népal (le)", "NP", "NPL", "524"},
+ {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
+ {"Curaçao", "Curaçao", "CW", "CUW", "531"},
+ {"Aruba", "Aruba", "AW", "ABW", "533"},
+ {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
+ {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
+ {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
+ {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
+ {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
+ {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
+ {"Niger (the)", "Niger (le)", "NE", "NER", "562"},
+ {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
+ {"Niue", "Niue", "NU", "NIU", "570"},
+ {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
+ {"Norway", "Norvège (la)", "NO", "NOR", "578"},
+ {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
+ {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
+ {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
+ {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
+ {"Palau", "Palaos (les)", "PW", "PLW", "585"},
+ {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
+ {"Panama", "Panama (le)", "PA", "PAN", "591"},
+ {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
+ {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
+ {"Peru", "Pérou (le)", "PE", "PER", "604"},
+ {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
+ {"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
+ {"Poland", "Pologne (la)", "PL", "POL", "616"},
+ {"Portugal", "Portugal (le)", "PT", "PRT", "620"},
+ {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
+ {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
+ {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
+ {"Qatar", "Qatar (le)", "QA", "QAT", "634"},
+ {"Réunion", "Réunion (La)", "RE", "REU", "638"},
+ {"Romania", "Roumanie (la)", "RO", "ROU", "642"},
+ {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
+ {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
+ {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
+ {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
+ {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
+ {"Anguilla", "Anguilla", "AI", "AIA", "660"},
+ {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
+ {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
+ {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
+ {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
+ {"San Marino", "Saint-Marin", "SM", "SMR", "674"},
+ {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
+ {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
+ {"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
+ {"Serbia", "Serbie (la)", "RS", "SRB", "688"},
+ {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
+ {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
+ {"Singapore", "Singapour", "SG", "SGP", "702"},
+ {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
+ {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
+ {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
+ {"Somalia", "Somalie (la)", "SO", "SOM", "706"},
+ {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
+ {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
+ {"Spain", "Espagne (l')", "ES", "ESP", "724"},
+ {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
+ {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
+ {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
+ {"Suriname", "Suriname (le)", "SR", "SUR", "740"},
+ {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
+ {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
+ {"Sweden", "Suède (la)", "SE", "SWE", "752"},
+ {"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
+ {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
+ {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
+ {"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
+ {"Togo", "Togo (le)", "TG", "TGO", "768"},
+ {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
+ {"Tonga", "Tonga (les)", "TO", "TON", "776"},
+ {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
+ {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
+ {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
+ {"Turkey", "Turquie (la)", "TR", "TUR", "792"},
+ {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
+ {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
+ {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
+ {"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
+ {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
+ {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
+ {"Egypt", "Égypte (l')", "EG", "EGY", "818"},
+ {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
+ {"Guernsey", "Guernesey", "GG", "GGY", "831"},
+ {"Jersey", "Jersey", "JE", "JEY", "832"},
+ {"Isle of Man", "Île de Man", "IM", "IMN", "833"},
+ {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
+ {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
+ {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
+ {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
+ {"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
+ {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
+ {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
+ {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
+ {"Samoa", "Samoa (le)", "WS", "WSM", "882"},
+ {"Yemen", "Yémen (le)", "YE", "YEM", "887"},
+ {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
+}
diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go
new file mode 100644
index 0000000..200b913
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/utils.go
@@ -0,0 +1,213 @@
+package govalidator
+
+import (
+ "errors"
+ "fmt"
+ "html"
+ "path"
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+// Contains check if the string contains the substring.
+func Contains(str, substring string) bool {
+ return strings.Contains(str, substring)
+}
+
+// Matches check if string matches the pattern (pattern is regular expression)
+// In case of error return false
+func Matches(str, pattern string) bool {
+ match, _ := regexp.MatchString(pattern, str)
+ return match
+}
+
+// LeftTrim trim characters from the left-side of the input.
+// If second argument is empty, it's will be remove leading spaces.
+func LeftTrim(str, chars string) string {
+ pattern := ""
+ if chars == "" {
+ pattern = "^\\s+"
+ } else {
+ pattern = "^[" + chars + "]+"
+ }
+ r, _ := regexp.Compile(pattern)
+ return string(r.ReplaceAll([]byte(str), []byte("")))
+}
+
+// RightTrim trim characters from the right-side of the input.
+// If second argument is empty, it's will be remove spaces.
+func RightTrim(str, chars string) string {
+ pattern := ""
+ if chars == "" {
+ pattern = "\\s+$"
+ } else {
+ pattern = "[" + chars + "]+$"
+ }
+ r, _ := regexp.Compile(pattern)
+ return string(r.ReplaceAll([]byte(str), []byte("")))
+}
+
+// Trim trim characters from both sides of the input.
+// If second argument is empty, it's will be remove spaces.
+func Trim(str, chars string) string {
+ return LeftTrim(RightTrim(str, chars), chars)
+}
+
+// WhiteList remove characters that do not appear in the whitelist.
+func WhiteList(str, chars string) string {
+ pattern := "[^" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return string(r.ReplaceAll([]byte(str), []byte("")))
+}
+
+// BlackList remove characters that appear in the blacklist.
+func BlackList(str, chars string) string {
+ pattern := "[" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return string(r.ReplaceAll([]byte(str), []byte("")))
+}
+
+// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
+// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
+func StripLow(str string, keepNewLines bool) string {
+ chars := ""
+ if keepNewLines {
+ chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
+ } else {
+ chars = "\x00-\x1F\x7F"
+ }
+ return BlackList(str, chars)
+}
+
+// ReplacePattern replace regular expression pattern in string
+func ReplacePattern(str, pattern, replace string) string {
+ r, _ := regexp.Compile(pattern)
+ return string(r.ReplaceAll([]byte(str), []byte(replace)))
+}
+
+// Escape replace <, >, & and " with HTML entities.
+var Escape = html.EscapeString
+
+func addSegment(inrune, segment []rune) []rune {
+ if len(segment) == 0 {
+ return inrune
+ }
+ if len(inrune) != 0 {
+ inrune = append(inrune, '_')
+ }
+ inrune = append(inrune, segment...)
+ return inrune
+}
+
+// UnderscoreToCamelCase converts from underscore separated form to camel case form.
+// Ex.: my_func => MyFunc
+func UnderscoreToCamelCase(s string) string {
+ return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
+}
+
+// CamelCaseToUnderscore converts from camel case form to underscore separated form.
+// Ex.: MyFunc => my_func
+func CamelCaseToUnderscore(str string) string {
+ var output []rune
+ var segment []rune
+ for _, r := range str {
+ if !unicode.IsLower(r) {
+ output = addSegment(output, segment)
+ segment = nil
+ }
+ segment = append(segment, unicode.ToLower(r))
+ }
+ output = addSegment(output, segment)
+ return string(output)
+}
+
+// Reverse return reversed string
+func Reverse(s string) string {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r)
+}
+
+// GetLines split string by "\n" and return array of lines
+func GetLines(s string) []string {
+ return strings.Split(s, "\n")
+}
+
+// GetLine return specified line of multiline string
+func GetLine(s string, index int) (string, error) {
+ lines := GetLines(s)
+ if index < 0 || index >= len(lines) {
+ return "", errors.New("line index out of bounds")
+ }
+ return lines[index], nil
+}
+
+// RemoveTags remove all tags from HTML string
+func RemoveTags(s string) string {
+ return ReplacePattern(s, "<[^>]*>", "")
+}
+
+// SafeFileName return safe string that can be used in file names
+func SafeFileName(str string) string {
+ name := strings.ToLower(str)
+ name = path.Clean(path.Base(name))
+ name = strings.Trim(name, " ")
+ separators, err := regexp.Compile(`[ &_=+:]`)
+ if err == nil {
+ name = separators.ReplaceAllString(name, "-")
+ }
+ legal, err := regexp.Compile(`[^[:alnum:]-.]`)
+ if err == nil {
+ name = legal.ReplaceAllString(name, "")
+ }
+ for strings.Contains(name, "--") {
+ name = strings.Replace(name, "--", "-", -1)
+ }
+ return name
+}
+
+// NormalizeEmail canonicalize an email address.
+// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
+// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
+// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
+// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
+// normalized to @gmail.com.
+func NormalizeEmail(str string) (string, error) {
+ if !IsEmail(str) {
+ return "", fmt.Errorf("%s is not an email", str)
+ }
+ parts := strings.Split(str, "@")
+ parts[0] = strings.ToLower(parts[0])
+ parts[1] = strings.ToLower(parts[1])
+ if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
+ parts[1] = "gmail.com"
+ parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
+ }
+ return strings.Join(parts, "@"), nil
+}
+
+// Truncate a string to the closest length without breaking words.
+func Truncate(str string, length int, ending string) string {
+ var aftstr, befstr string
+ if len(str) > length {
+ words := strings.Fields(str)
+ before, present := 0, 0
+ for i := range words {
+ befstr = aftstr
+ before = present
+ aftstr = aftstr + words[i] + " "
+ present = len(aftstr)
+ if present > length && i != 0 {
+ if (length - before) < (present - length) {
+ return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ }
+ }
+
+ return str
+}
diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go
new file mode 100644
index 0000000..030bef0
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/validator.go
@@ -0,0 +1,948 @@
+// Package govalidator is package of validators and sanitizers for strings, structs and collections.
+package govalidator
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/url"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+var fieldsRequiredByDefault bool
+
+const maxURLRuneCount = 2083
+const minURLRuneCount = 3
+
+// SetFieldsRequiredByDefault causes validation to fail when struct fields
+// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
+// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+// type exampleStruct struct {
+// Name string ``
+// Email string `valid:"email"`
+// This, however, will only fail when Email is empty or an invalid email address:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email"`
+// Lastly, this will only fail when Email is an invalid email address but not when it's empty:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email,optional"`
+func SetFieldsRequiredByDefault(value bool) {
+ fieldsRequiredByDefault = value
+}
+
+// IsEmail check if the string is an email.
+func IsEmail(str string) bool {
+ // TODO uppercase letters are not supported
+ return rxEmail.MatchString(str)
+}
+
+// IsURL check if the string is an URL.
+func IsURL(str string) bool {
+ if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
+ return false
+ }
+ u, err := url.Parse(str)
+ if err != nil {
+ return false
+ }
+ if strings.HasPrefix(u.Host, ".") {
+ return false
+ }
+ if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
+ return false
+ }
+ return rxURL.MatchString(str)
+
+}
+
+// IsRequestURL check if the string rawurl, assuming
+// it was recieved in an HTTP request, is a valid
+// URL confirm to RFC 3986
+func IsRequestURL(rawurl string) bool {
+ url, err := url.ParseRequestURI(rawurl)
+ if err != nil {
+ return false //Couldn't even parse the rawurl
+ }
+ if len(url.Scheme) == 0 {
+ return false //No Scheme found
+ }
+ return true
+}
+
+// IsRequestURI check if the string rawurl, assuming
+// it was recieved in an HTTP request, is an
+// absolute URI or an absolute path.
+func IsRequestURI(rawurl string) bool {
+ _, err := url.ParseRequestURI(rawurl)
+ return err == nil
+}
+
+// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid.
+func IsAlpha(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlpha.MatchString(str)
+}
+
+//IsUTFLetter check if the string contains only unicode letter characters.
+//Similar to IsAlpha but for all languages. Empty string is valid.
+func IsUTFLetter(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+
+ for _, c := range str {
+ if !unicode.IsLetter(c) {
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid.
+func IsAlphanumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlphanumeric.MatchString(str)
+}
+
+// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid.
+func IsUTFLetterNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ for _, c := range str {
+ if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsNumeric check if the string contains only numbers. Empty string is valid.
+func IsNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxNumeric.MatchString(str)
+}
+
+// IsUTFNumeric check if the string contains only unicode numbers of any kind.
+// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
+func IsUTFNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if unicode.IsNumber(c) == false { //numbers && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid.
+func IsUTFDigit(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsDigit(c) { //digits && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsHexadecimal check if the string is a hexadecimal number.
+func IsHexadecimal(str string) bool {
+ return rxHexadecimal.MatchString(str)
+}
+
+// IsHexcolor check if the string is a hexadecimal color.
+func IsHexcolor(str string) bool {
+ return rxHexcolor.MatchString(str)
+}
+
+// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
+func IsRGBcolor(str string) bool {
+ return rxRGBcolor.MatchString(str)
+}
+
+// IsLowerCase check if the string is lowercase. Empty string is valid.
+func IsLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToLower(str)
+}
+
+// IsUpperCase check if the string is uppercase. Empty string is valid.
+func IsUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToUpper(str)
+}
+
+// IsInt check if the string is an integer. Empty string is valid.
+func IsInt(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxInt.MatchString(str)
+}
+
+// IsFloat check if the string is a float.
+func IsFloat(str string) bool {
+ return str != "" && rxFloat.MatchString(str)
+}
+
+// IsDivisibleBy check if the string is a number that's divisible by another.
+// If second argument is not valid integer or zero, it's return false.
+// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
+func IsDivisibleBy(str, num string) bool {
+ f, _ := ToFloat(str)
+ p := int64(f)
+ q, _ := ToInt(num)
+ if q == 0 {
+ return false
+ }
+ return (p == 0) || (p%q == 0)
+}
+
+// IsNull check if the string is null.
+func IsNull(str string) bool {
+ return len(str) == 0
+}
+
+// IsByteLength check if the string's length (in bytes) falls in a range.
+func IsByteLength(str string, min, max int) bool {
+ return len(str) >= min && len(str) <= max
+}
+
+// IsUUIDv3 check if the string is a UUID version 3.
+func IsUUIDv3(str string) bool {
+ return rxUUID3.MatchString(str)
+}
+
+// IsUUIDv4 check if the string is a UUID version 4.
+func IsUUIDv4(str string) bool {
+ return rxUUID4.MatchString(str)
+}
+
+// IsUUIDv5 check if the string is a UUID version 5.
+func IsUUIDv5(str string) bool {
+ return rxUUID5.MatchString(str)
+}
+
+// IsUUID check if the string is a UUID (version 3, 4 or 5).
+func IsUUID(str string) bool {
+ return rxUUID.MatchString(str)
+}
+
+// IsCreditCard check if the string is a credit card.
+func IsCreditCard(str string) bool {
+ r, _ := regexp.Compile("[^0-9]+")
+ sanitized := r.ReplaceAll([]byte(str), []byte(""))
+ if !rxCreditCard.MatchString(string(sanitized)) {
+ return false
+ }
+ var sum int64
+ var digit string
+ var tmpNum int64
+ var shouldDouble bool
+ for i := len(sanitized) - 1; i >= 0; i-- {
+ digit = string(sanitized[i:(i + 1)])
+ tmpNum, _ = ToInt(digit)
+ if shouldDouble {
+ tmpNum *= 2
+ if tmpNum >= 10 {
+ sum += ((tmpNum % 10) + 1)
+ } else {
+ sum += tmpNum
+ }
+ } else {
+ sum += tmpNum
+ }
+ shouldDouble = !shouldDouble
+ }
+
+ if sum%10 == 0 {
+ return true
+ }
+ return false
+}
+
+// IsISBN10 check if the string is an ISBN version 10.
+func IsISBN10(str string) bool {
+ return IsISBN(str, 10)
+}
+
+// IsISBN13 check if the string is an ISBN version 13.
+func IsISBN13(str string) bool {
+ return IsISBN(str, 13)
+}
+
+// IsISBN check if the string is an ISBN (version 10 or 13).
+// If version value is not equal to 10 or 13, it will be check both variants.
+func IsISBN(str string, version int) bool {
+ r, _ := regexp.Compile("[\\s-]+")
+ sanitized := r.ReplaceAll([]byte(str), []byte(""))
+ var checksum int32
+ var i int32
+ if version == 10 {
+ if !rxISBN10.MatchString(string(sanitized)) {
+ return false
+ }
+ for i = 0; i < 9; i++ {
+ checksum += (i + 1) * int32(sanitized[i]-'0')
+ }
+ if sanitized[9] == 'X' {
+ checksum += 10 * 10
+ } else {
+ checksum += 10 * int32(sanitized[9]-'0')
+ }
+ if checksum%11 == 0 {
+ return true
+ }
+ return false
+ } else if version == 13 {
+ if !rxISBN13.MatchString(string(sanitized)) {
+ return false
+ }
+ factor := []int32{1, 3}
+ for i = 0; i < 12; i++ {
+ checksum += factor[i%2] * int32(sanitized[i]-'0')
+ }
+ if (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 {
+ return true
+ }
+ return false
+ }
+ return IsISBN(str, 10) || IsISBN(str, 13)
+}
+
+// IsJSON check if the string is valid JSON (note: uses json.Unmarshal).
+func IsJSON(str string) bool {
+ var js json.RawMessage
+ return json.Unmarshal([]byte(str), &js) == nil
+}
+
+// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid.
+func IsMultibyte(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxMultibyte.MatchString(str)
+}
+
+// IsASCII check if the string contains ASCII chars only. Empty string is valid.
+func IsASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxASCII.MatchString(str)
+}
+
+// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid.
+func IsPrintableASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxPrintableASCII.MatchString(str)
+}
+
+// IsFullWidth check if the string contains any full-width chars. Empty string is valid.
+func IsFullWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxFullWidth.MatchString(str)
+}
+
+// IsHalfWidth check if the string contains any half-width chars. Empty string is valid.
+func IsHalfWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str)
+}
+
+// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid.
+func IsVariableWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
+}
+
+// IsBase64 check if a string is base64 encoded.
+func IsBase64(str string) bool {
+ return rxBase64.MatchString(str)
+}
+
+// IsFilePath check is a string is Win or Unix file path and returns it's type.
+func IsFilePath(str string) (bool, int) {
+ if rxWinPath.MatchString(str) {
+ //check windows path limit see:
+ // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
+ if len(str[3:]) > 32767 {
+ return false, Win
+ }
+ return true, Win
+ } else if rxUnixPath.MatchString(str) {
+ return true, Unix
+ }
+ return false, Unknown
+}
+
+// IsDataURI checks if a string is base64 encoded data URI such as an image
+func IsDataURI(str string) bool {
+ dataURI := strings.Split(str, ",")
+ if !rxDataURI.MatchString(dataURI[0]) {
+ return false
+ }
+ return IsBase64(dataURI[1])
+}
+
+// IsISO3166Alpha2 checks if a string is valid two-letter country code
+func IsISO3166Alpha2(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO3166Alpha3 checks if a string is valid three-letter country code
+func IsISO3166Alpha3(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha3Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsDNSName will validate the given string as a DNS name
+func IsDNSName(str string) bool {
+ if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
+ // constraints already violated
+ return false
+ }
+ return rxDNSName.MatchString(str)
+}
+
+// IsDialString validates the given string for usage with the various Dial() functions
+func IsDialString(str string) bool {
+
+ if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) {
+ return true
+ }
+
+ return false
+}
+
+// IsIP checks if a string is either IP version 4 or 6.
+func IsIP(str string) bool {
+ return net.ParseIP(str) != nil
+}
+
+// IsPort checks if a string represents a valid port
+func IsPort(str string) bool {
+ if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 {
+ return true
+ }
+ return false
+}
+
+// IsIPv4 check if the string is an IP version 4.
+func IsIPv4(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ".")
+}
+
+// IsIPv6 check if the string is an IP version 6.
+func IsIPv6(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ":")
+}
+
+// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6)
+func IsCIDR(str string) bool {
+ _, _, err := net.ParseCIDR(str)
+ return err == nil
+}
+
+// IsMAC check if a string is valid MAC address.
+// Possible MAC formats:
+// 01:23:45:67:89:ab
+// 01:23:45:67:89:ab:cd:ef
+// 01-23-45-67-89-ab
+// 01-23-45-67-89-ab-cd-ef
+// 0123.4567.89ab
+// 0123.4567.89ab.cdef
+func IsMAC(str string) bool {
+ _, err := net.ParseMAC(str)
+ return err == nil
+}
+
+// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name
+func IsHost(str string) bool {
+ return IsIP(str) || IsDNSName(str)
+}
+
+// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId.
+func IsMongoID(str string) bool {
+ return rxHexadecimal.MatchString(str) && (len(str) == 24)
+}
+
+// IsLatitude check if a string is valid latitude.
+func IsLatitude(str string) bool {
+ return rxLatitude.MatchString(str)
+}
+
+// IsLongitude check if a string is valid longitude.
+func IsLongitude(str string) bool {
+ return rxLongitude.MatchString(str)
+}
+
+// ValidateStruct use tags for fields.
+// result will be equal to `false` if there are any errors.
+func ValidateStruct(s interface{}) (bool, error) {
+ if s == nil {
+ return true, nil
+ }
+ result := true
+ var err error
+ val := reflect.ValueOf(s)
+ if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ // we only accept structs
+ if val.Kind() != reflect.Struct {
+ return false, fmt.Errorf("function only accepts structs; got %s", val.Kind())
+ }
+ var errs Errors
+ for i := 0; i < val.NumField(); i++ {
+ valueField := val.Field(i)
+ typeField := val.Type().Field(i)
+ if typeField.PkgPath != "" {
+ continue // Private field
+ }
+ resultField, err2 := typeCheck(valueField, typeField, val)
+ if err2 != nil {
+ errs = append(errs, err2)
+ }
+ result = result && resultField
+ }
+ if len(errs) > 0 {
+ err = errs
+ }
+ return result, err
+}
+
+// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
+func parseTagIntoMap(tag string) tagOptionsMap {
+ optionsMap := make(tagOptionsMap)
+ options := strings.SplitN(tag, ",", -1)
+ for _, option := range options {
+ validationOptions := strings.Split(option, "~")
+ if !isValidTag(validationOptions[0]) {
+ continue
+ }
+ if len(validationOptions) == 2 {
+ optionsMap[validationOptions[0]] = validationOptions[1]
+ } else {
+ optionsMap[validationOptions[0]] = ""
+ }
+ }
+ return optionsMap
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// IsSSN will validate the given string as a U.S. Social Security Number
+func IsSSN(str string) bool {
+ if str == "" || len(str) != 11 {
+ return false
+ }
+ return rxSSN.MatchString(str)
+}
+
+// IsSemver check if string is valid semantic version
+func IsSemver(str string) bool {
+ return rxSemver.MatchString(str)
+}
+
+func IsTime(str string, format string) bool {
+ _, err := time.Parse(format, str)
+ return err == nil
+}
+
+func IsRFC3339(str string) bool {
+ return IsTime(str, time.RFC3339)
+}
+
+// ByteLength check string's length
+func ByteLength(str string, params ...string) bool {
+ if len(params) == 2 {
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return len(str) >= int(min) && len(str) <= int(max)
+ }
+
+ return false
+}
+
+// RuneLength check string's length
+// Alias for StringLength
+func RuneLength(str string, params ...string) bool {
+ return StringLength(str, params...)
+}
+
+// StringMatches checks if a string matches a given pattern.
+func StringMatches(s string, params ...string) bool {
+ if len(params) == 1 {
+ pattern := params[0]
+ return Matches(s, pattern)
+ }
+ return false
+}
+
+// StringLength check string's length (including multi byte strings)
+func StringLength(str string, params ...string) bool {
+
+ if len(params) == 2 {
+ strLength := utf8.RuneCountInString(str)
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return strLength >= int(min) && strLength <= int(max)
+ }
+
+ return false
+}
+
+func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
+ if requiredOption, isRequired := options["required"]; isRequired {
+ if len(requiredOption) > 0 {
+ return false, Error{t.Name, fmt.Errorf(requiredOption), true}
+ }
+ return false, Error{t.Name, fmt.Errorf("non zero value required"), false}
+ } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
+ return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false}
+ }
+ // not required and empty is valid
+ return true, nil
+}
+
+func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value) (bool, error) {
+ if !v.IsValid() {
+ return false, nil
+ }
+
+ tag := t.Tag.Get(tagName)
+
+ // Check if the field should be ignored
+ switch tag {
+ case "":
+ if !fieldsRequiredByDefault {
+ return true, nil
+ }
+ return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false}
+ case "-":
+ return true, nil
+ }
+
+ options := parseTagIntoMap(tag)
+ var customTypeErrors Errors
+ var customTypeValidatorsExist bool
+ for validatorName, customErrorMessage := range options {
+ if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
+ customTypeValidatorsExist = true
+ if result := validatefunc(v.Interface(), o.Interface()); !result {
+ if len(customErrorMessage) > 0 {
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf(customErrorMessage), CustomErrorMessageExists: true})
+ continue
+ }
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false})
+ }
+ }
+ }
+ if customTypeValidatorsExist {
+ if len(customTypeErrors.Errors()) > 0 {
+ return false, customTypeErrors
+ }
+ return true, nil
+ }
+
+ if isEmptyValue(v) {
+ // an empty value is not validated, check only required
+ return checkRequired(v, t, options)
+ }
+
+ switch v.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ // for each tag option check the map of validator functions
+ for validator, customErrorMessage := range options {
+ var negate bool
+ customMsgExists := (len(customErrorMessage) > 0)
+ // Check wether the tag looks like '!something' or 'something'
+ if validator[0] == '!' {
+ validator = string(validator[1:])
+ negate = true
+ }
+
+ // Check for param validators
+ for key, value := range ParamTagRegexMap {
+ ps := value.FindStringSubmatch(validator)
+ if len(ps) > 0 {
+ if validatefunc, ok := ParamTagMap[key]; ok {
+ switch v.Kind() {
+ case reflect.String:
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
+ var err error
+ if !negate {
+ if customMsgExists {
+ err = fmt.Errorf(customErrorMessage)
+ } else {
+ err = fmt.Errorf("%s does not validate as %s", field, validator)
+ }
+
+ } else {
+ if customMsgExists {
+ err = fmt.Errorf(customErrorMessage)
+ } else {
+ err = fmt.Errorf("%s does validate as %s", field, validator)
+ }
+ }
+ return false, Error{t.Name, err, customMsgExists}
+ }
+ default:
+ // type not yet supported, fail
+ return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false}
+ }
+ }
+ }
+ }
+
+ if validatefunc, ok := TagMap[validator]; ok {
+ switch v.Kind() {
+ case reflect.String:
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field); !result && !negate || result && negate {
+ var err error
+
+ if !negate {
+ if customMsgExists {
+ err = fmt.Errorf(customErrorMessage)
+ } else {
+ err = fmt.Errorf("%s does not validate as %s", field, validator)
+ }
+ } else {
+ if customMsgExists {
+ err = fmt.Errorf(customErrorMessage)
+ } else {
+ err = fmt.Errorf("%s does validate as %s", field, validator)
+ }
+ }
+ return false, Error{t.Name, err, customMsgExists}
+ }
+ default:
+ //Not Yet Supported Types (Fail here!)
+ err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
+ return false, Error{t.Name, err, false}
+ }
+ }
+ }
+ return true, nil
+ case reflect.Map:
+ if v.Type().Key().Kind() != reflect.String {
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+ var sv stringValues
+ sv = v.MapKeys()
+ sort.Sort(sv)
+ result := true
+ for _, k := range sv {
+ resultItem, err := ValidateStruct(v.MapIndex(k).Interface())
+ if err != nil {
+ return false, err
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Slice:
+ result := true
+ for i := 0; i < v.Len(); i++ {
+ var resultItem bool
+ var err error
+ if v.Index(i).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.Index(i), t, o)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.Index(i).Interface())
+ if err != nil {
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Array:
+ result := true
+ for i := 0; i < v.Len(); i++ {
+ var resultItem bool
+ var err error
+ if v.Index(i).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.Index(i), t, o)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.Index(i).Interface())
+ if err != nil {
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Interface:
+ // If the value is an interface then encode its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return ValidateStruct(v.Interface())
+ case reflect.Ptr:
+ // If the value is a pointer then check its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return typeCheck(v.Elem(), t, o)
+ case reflect.Struct:
+ return ValidateStruct(v.Interface())
+ default:
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String, reflect.Array:
+ return v.Len() == 0
+ case reflect.Map, reflect.Slice:
+ return v.Len() == 0 || v.IsNil()
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+
+ return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
+}
+
+// ErrorByField returns error for specified field of the struct
+// validated by ValidateStruct or empty string if there are no errors
+// or this field doesn't exists or doesn't have any errors.
+func ErrorByField(e error, field string) string {
+ if e == nil {
+ return ""
+ }
+ return ErrorsByField(e)[field]
+}
+
+// ErrorsByField returns map of errors of the struct validated
+// by ValidateStruct or empty map if there are no errors.
+func ErrorsByField(e error) map[string]string {
+ m := make(map[string]string)
+ if e == nil {
+ return m
+ }
+ // prototype for ValidateStruct
+
+ switch e.(type) {
+ case Error:
+ m[e.(Error).Name] = e.(Error).Err.Error()
+ case Errors:
+ for _, item := range e.(Errors).Errors() {
+ n := ErrorsByField(item)
+ for k, v := range n {
+ m[k] = v
+ }
+ }
+ }
+
+ return m
+}
+
+// Error returns string equivalent for reflect.Type
+func (e *UnsupportedTypeError) Error() string {
+ return "validator: unsupported type: " + e.Type.String()
+}
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml
new file mode 100644
index 0000000..4840449
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/wercker.yml
@@ -0,0 +1,15 @@
+box: wercker/golang
+build:
+ steps:
+ - setup-go-workspace
+
+ - script:
+ name: go get
+ code: |
+ go version
+ go get -t ./...
+
+ - script:
+ name: go test
+ code: |
+ go test -race ./...
diff --git a/vendor/github.com/gizak/termui/LICENSE b/vendor/github.com/gizak/termui/LICENSE
new file mode 100644
index 0000000..311ccc7
--- /dev/null
+++ b/vendor/github.com/gizak/termui/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Zack Guo
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/gizak/termui/README.md b/vendor/github.com/gizak/termui/README.md
new file mode 100644
index 0000000..d5f3d9a
--- /dev/null
+++ b/vendor/github.com/gizak/termui/README.md
@@ -0,0 +1,151 @@
+# termui [![Build Status](https://travis-ci.org/gizak/termui.svg?branch=master)](https://travis-ci.org/gizak/termui) [![Doc Status](https://godoc.org/github.com/gizak/termui?status.png)](https://godoc.org/github.com/gizak/termui)
+
+
+
+`termui` is a cross-platform, easy-to-compile, and fully-customizable terminal dashboard. It is inspired by [blessed-contrib](https://github.com/yaronn/blessed-contrib), but purely in Go.
+
+Now version v2 has arrived! It brings new event system, new theme system, new `Buffer` interface and specific colour text rendering. (some docs are missing, but it will be completed soon!)
+
+## Installation
+
+`master` mirrors v2 branch, to install:
+
+ go get -u github.com/gizak/termui
+
+It is recommanded to use locked deps by using [glide](https://glide.sh): move to `termui` src directory then run `glide up`.
+
+For the compatible reason, you can choose to install the legacy version of `termui`:
+
+ go get gopkg.in/gizak/termui.v1
+
+## Usage
+
+### Layout
+
+To use `termui`, the very first thing you may want to know is how to manage layout. `termui` offers two ways of doing this, known as absolute layout and grid layout.
+
+__Absolute layout__
+
+Each widget has an underlying block structure which basically is a box model. It has border, label and padding properties. A border of a widget can be chosen to hide or display (with its border label), you can pick a different front/back colour for the border as well. To display such a widget at a specific location in terminal window, you need to assign `.X`, `.Y`, `.Height`, `.Width` values for each widget before sending it to `.Render`. Let's demonstrate these by a code snippet:
+
+`````go
+ import ui "github.com/gizak/termui" // <- ui shortcut, optional
+
+ func main() {
+ err := ui.Init()
+ if err != nil {
+ panic(err)
+ }
+ defer ui.Close()
+
+ p := ui.NewPar(":PRESS q TO QUIT DEMO")
+ p.Height = 3
+ p.Width = 50
+ p.TextFgColor = ui.ColorWhite
+ p.BorderLabel = "Text Box"
+ p.BorderFg = ui.ColorCyan
+
+ g := ui.NewGauge()
+ g.Percent = 50
+ g.Width = 50
+ g.Height = 3
+ g.Y = 11
+ g.BorderLabel = "Gauge"
+ g.BarColor = ui.ColorRed
+ g.BorderFg = ui.ColorWhite
+ g.BorderLabelFg = ui.ColorCyan
+
+ ui.Render(p, g) // feel free to call Render, it's async and non-block
+
+ // event handler...
+ }
+`````
+
+Note that components can be overlapped (I'd rather call this a feature...), `Render(rs ...Renderer)` renders its args from left to right (i.e. each component's weight is arising from left to right).
+
+__Grid layout:__
+
+
+
+Grid layout uses [12 columns grid system](http://www.w3schools.com/bootstrap/bootstrap_grid_system.asp) with expressive syntax. To use `Grid`, all we need to do is build a widget tree consisting of `Row`s and `Col`s (Actually a `Col` is also a `Row` but with a widget endpoint attached).
+
+```go
+ import ui "github.com/gizak/termui"
+ // init and create widgets...
+
+ // build
+ ui.Body.AddRows(
+ ui.NewRow(
+ ui.NewCol(6, 0, widget0),
+ ui.NewCol(6, 0, widget1)),
+ ui.NewRow(
+ ui.NewCol(3, 0, widget2),
+ ui.NewCol(3, 0, widget30, widget31, widget32),
+ ui.NewCol(6, 0, widget4)))
+
+ // calculate layout
+ ui.Body.Align()
+
+ ui.Render(ui.Body)
+```
+
+### Events
+
+`termui` ships with a http-like event mux handling system. All events are channeled up from different sources (typing, click, windows resize, custom event) and then encoded as universal `Event` object. `Event.Path` indicates the event type and `Event.Data` stores the event data struct. Add a handler to a certain event is easy as below:
+
+```go
+ // handle key q pressing
+ ui.Handle("/sys/kbd/q", func(ui.Event) {
+ // press q to quit
+ ui.StopLoop()
+ })
+
+ ui.Handle("/sys/kbd/C-x", func(ui.Event) {
+ // handle Ctrl + x combination
+ })
+
+ ui.Handle("/sys/kbd", func(ui.Event) {
+ // handle all other key pressing
+ })
+
+ // handle a 1s timer
+ ui.Handle("/timer/1s", func(e ui.Event) {
+ t := e.Data.(ui.EvtTimer)
+ // t is a EvtTimer
+ if t.Count%2 ==0 {
+ // do something
+ }
+ })
+
+ ui.Loop() // block until StopLoop is called
+```
+
+### Widgets
+
+Click image to see the corresponding demo codes.
+
+[](https://github.com/gizak/termui/blob/master/_example/par.go)
+[](https://github.com/gizak/termui/blob/master/_example/list.go)
+[](https://github.com/gizak/termui/blob/master/_example/gauge.go)
+[](https://github.com/gizak/termui/blob/master/_example/linechart.go)
+[](https://github.com/gizak/termui/blob/master/_example/barchart.go)
+[](https://github.com/gizak/termui/blob/master/_example/mbarchart.go)
+[](https://github.com/gizak/termui/blob/master/_example/sparklines.go)
+[](https://github.com/gizak/termui/blob/master/_example/table.go)
+
+## GoDoc
+
+[godoc](https://godoc.org/github.com/gizak/termui)
+
+## TODO
+
+- [x] Grid layout
+- [x] Event system
+- [x] Canvas widget
+- [x] Refine APIs
+- [ ] Focusable widgets
+
+## Changelog
+
+## License
+This library is under the [MIT License](http://opensource.org/licenses/MIT)
diff --git a/vendor/github.com/gizak/termui/barchart.go b/vendor/github.com/gizak/termui/barchart.go
new file mode 100644
index 0000000..6560c8b
--- /dev/null
+++ b/vendor/github.com/gizak/termui/barchart.go
@@ -0,0 +1,149 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "fmt"
+
+// BarChart creates multiple bars in a widget:
+/*
+ bc := termui.NewBarChart()
+ data := []int{3, 2, 5, 3, 9, 5}
+ bclabels := []string{"S0", "S1", "S2", "S3", "S4", "S5"}
+ bc.BorderLabel = "Bar Chart"
+ bc.Data = data
+ bc.Width = 26
+ bc.Height = 10
+ bc.DataLabels = bclabels
+ bc.TextColor = termui.ColorGreen
+ bc.BarColor = termui.ColorRed
+ bc.NumColor = termui.ColorYellow
+*/
+type BarChart struct {
+ Block
+ BarColor Attribute
+ TextColor Attribute
+ NumColor Attribute
+ Data []int
+ DataLabels []string
+ BarWidth int
+ BarGap int
+ CellChar rune
+ labels [][]rune
+ dataNum [][]rune
+ numBar int
+ scale float64
+ max int
+}
+
+// NewBarChart returns a new *BarChart with current theme.
+func NewBarChart() *BarChart {
+ bc := &BarChart{Block: *NewBlock()}
+ bc.BarColor = ThemeAttr("barchart.bar.bg")
+ bc.NumColor = ThemeAttr("barchart.num.fg")
+ bc.TextColor = ThemeAttr("barchart.text.fg")
+ bc.BarGap = 1
+ bc.BarWidth = 3
+ bc.CellChar = ' '
+ return bc
+}
+
+func (bc *BarChart) layout() {
+ bc.numBar = bc.innerArea.Dx() / (bc.BarGap + bc.BarWidth)
+ bc.labels = make([][]rune, bc.numBar)
+ bc.dataNum = make([][]rune, len(bc.Data))
+
+ for i := 0; i < bc.numBar && i < len(bc.DataLabels) && i < len(bc.Data); i++ {
+ bc.labels[i] = trimStr2Runes(bc.DataLabels[i], bc.BarWidth)
+ n := bc.Data[i]
+ s := fmt.Sprint(n)
+ bc.dataNum[i] = trimStr2Runes(s, bc.BarWidth)
+ }
+
+ //bc.max = bc.Data[0] // what if Data is nil? Sometimes when bar graph is nill it produces panic with panic: runtime error: index out of range
+ // Asign a negative value to get maxvalue auto-populates
+ if bc.max == 0 {
+ bc.max = -1
+ }
+ for i := 0; i < len(bc.Data); i++ {
+ if bc.max < bc.Data[i] {
+ bc.max = bc.Data[i]
+ }
+ }
+ bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-1)
+}
+
+func (bc *BarChart) SetMax(max int) {
+
+ if max > 0 {
+ bc.max = max
+ }
+}
+
+// Buffer implements Bufferer interface.
+func (bc *BarChart) Buffer() Buffer {
+ buf := bc.Block.Buffer()
+ bc.layout()
+
+ for i := 0; i < bc.numBar && i < len(bc.Data) && i < len(bc.DataLabels); i++ {
+ h := int(float64(bc.Data[i]) / bc.scale)
+ oftX := i * (bc.BarWidth + bc.BarGap)
+
+ barBg := bc.Bg
+ barFg := bc.BarColor
+
+ if bc.CellChar == ' ' {
+ barBg = bc.BarColor
+ barFg = ColorDefault
+ if bc.BarColor == ColorDefault { // the same as above
+ barBg |= AttrReverse
+ }
+ }
+
+ // plot bar
+ for j := 0; j < bc.BarWidth; j++ {
+ for k := 0; k < h; k++ {
+ c := Cell{
+ Ch: bc.CellChar,
+ Bg: barBg,
+ Fg: barFg,
+ }
+
+ x := bc.innerArea.Min.X + i*(bc.BarWidth+bc.BarGap) + j
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - k
+ buf.Set(x, y, c)
+ }
+ }
+ // plot text
+ for j, k := 0, 0; j < len(bc.labels[i]); j++ {
+ w := charWidth(bc.labels[i][j])
+ c := Cell{
+ Ch: bc.labels[i][j],
+ Bg: bc.Bg,
+ Fg: bc.TextColor,
+ }
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 1
+ x := bc.innerArea.Min.X + oftX + k
+ buf.Set(x, y, c)
+ k += w
+ }
+ // plot num
+ for j := 0; j < len(bc.dataNum[i]); j++ {
+ c := Cell{
+ Ch: bc.dataNum[i][j],
+ Fg: bc.NumColor,
+ Bg: barBg,
+ }
+
+ if h == 0 {
+ c.Bg = bc.Bg
+ }
+ x := bc.innerArea.Min.X + oftX + (bc.BarWidth-len(bc.dataNum[i]))/2 + j
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2
+ buf.Set(x, y, c)
+ }
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/block.go b/vendor/github.com/gizak/termui/block.go
new file mode 100644
index 0000000..3e8571b
--- /dev/null
+++ b/vendor/github.com/gizak/termui/block.go
@@ -0,0 +1,240 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "image"
+
+// Hline is a horizontal line.
+type Hline struct {
+ X int
+ Y int
+ Len int
+ Fg Attribute
+ Bg Attribute
+}
+
+// Vline is a vertical line.
+type Vline struct {
+ X int
+ Y int
+ Len int
+ Fg Attribute
+ Bg Attribute
+}
+
+// Buffer draws a horizontal line.
+func (l Hline) Buffer() Buffer {
+ if l.Len <= 0 {
+ return NewBuffer()
+ }
+ return NewFilledBuffer(l.X, l.Y, l.X+l.Len, l.Y+1, HORIZONTAL_LINE, l.Fg, l.Bg)
+}
+
+// Buffer draws a vertical line.
+func (l Vline) Buffer() Buffer {
+ if l.Len <= 0 {
+ return NewBuffer()
+ }
+ return NewFilledBuffer(l.X, l.Y, l.X+1, l.Y+l.Len, VERTICAL_LINE, l.Fg, l.Bg)
+}
+
+// Buffer draws a box border.
+func (b Block) drawBorder(buf Buffer) {
+ if !b.Border {
+ return
+ }
+
+ min := b.area.Min
+ max := b.area.Max
+
+ x0 := min.X
+ y0 := min.Y
+ x1 := max.X - 1
+ y1 := max.Y - 1
+
+ // draw lines
+ if b.BorderTop {
+ buf.Merge(Hline{x0, y0, x1 - x0, b.BorderFg, b.BorderBg}.Buffer())
+ }
+ if b.BorderBottom {
+ buf.Merge(Hline{x0, y1, x1 - x0, b.BorderFg, b.BorderBg}.Buffer())
+ }
+ if b.BorderLeft {
+ buf.Merge(Vline{x0, y0, y1 - y0, b.BorderFg, b.BorderBg}.Buffer())
+ }
+ if b.BorderRight {
+ buf.Merge(Vline{x1, y0, y1 - y0, b.BorderFg, b.BorderBg}.Buffer())
+ }
+
+ // draw corners
+ if b.BorderTop && b.BorderLeft && b.area.Dx() > 0 && b.area.Dy() > 0 {
+ buf.Set(x0, y0, Cell{TOP_LEFT, b.BorderFg, b.BorderBg})
+ }
+ if b.BorderTop && b.BorderRight && b.area.Dx() > 1 && b.area.Dy() > 0 {
+ buf.Set(x1, y0, Cell{TOP_RIGHT, b.BorderFg, b.BorderBg})
+ }
+ if b.BorderBottom && b.BorderLeft && b.area.Dx() > 0 && b.area.Dy() > 1 {
+ buf.Set(x0, y1, Cell{BOTTOM_LEFT, b.BorderFg, b.BorderBg})
+ }
+ if b.BorderBottom && b.BorderRight && b.area.Dx() > 1 && b.area.Dy() > 1 {
+ buf.Set(x1, y1, Cell{BOTTOM_RIGHT, b.BorderFg, b.BorderBg})
+ }
+}
+
+func (b Block) drawBorderLabel(buf Buffer) {
+ maxTxtW := b.area.Dx() - 2
+ tx := DTrimTxCls(DefaultTxBuilder.Build(b.BorderLabel, b.BorderLabelFg, b.BorderLabelBg), maxTxtW)
+
+ for i, w := 0, 0; i < len(tx); i++ {
+ buf.Set(b.area.Min.X+1+w, b.area.Min.Y, tx[i])
+ w += tx[i].Width()
+ }
+}
+
+// Block is a base struct for all other upper level widgets,
+// consider it as css: display:block.
+// Normally you do not need to create it manually.
+type Block struct {
+ area image.Rectangle
+ innerArea image.Rectangle
+ X int
+ Y int
+ Border bool
+ BorderFg Attribute
+ BorderBg Attribute
+ BorderLeft bool
+ BorderRight bool
+ BorderTop bool
+ BorderBottom bool
+ BorderLabel string
+ BorderLabelFg Attribute
+ BorderLabelBg Attribute
+ Display bool
+ Bg Attribute
+ Width int
+ Height int
+ PaddingTop int
+ PaddingBottom int
+ PaddingLeft int
+ PaddingRight int
+ id string
+ Float Align
+}
+
+// NewBlock returns a *Block which inherits styles from current theme.
+func NewBlock() *Block {
+ b := Block{}
+ b.Display = true
+ b.Border = true
+ b.BorderLeft = true
+ b.BorderRight = true
+ b.BorderTop = true
+ b.BorderBottom = true
+ b.BorderBg = ThemeAttr("border.bg")
+ b.BorderFg = ThemeAttr("border.fg")
+ b.BorderLabelBg = ThemeAttr("label.bg")
+ b.BorderLabelFg = ThemeAttr("label.fg")
+ b.Bg = ThemeAttr("block.bg")
+ b.Width = 2
+ b.Height = 2
+ b.id = GenId()
+ b.Float = AlignNone
+ return &b
+}
+
+func (b Block) Id() string {
+ return b.id
+}
+
+// Align computes box model
+func (b *Block) Align() {
+ // outer
+ b.area.Min.X = 0
+ b.area.Min.Y = 0
+ b.area.Max.X = b.Width
+ b.area.Max.Y = b.Height
+
+ // float
+ b.area = AlignArea(TermRect(), b.area, b.Float)
+ b.area = MoveArea(b.area, b.X, b.Y)
+
+ // inner
+ b.innerArea.Min.X = b.area.Min.X + b.PaddingLeft
+ b.innerArea.Min.Y = b.area.Min.Y + b.PaddingTop
+ b.innerArea.Max.X = b.area.Max.X - b.PaddingRight
+ b.innerArea.Max.Y = b.area.Max.Y - b.PaddingBottom
+
+ if b.Border {
+ if b.BorderLeft {
+ b.innerArea.Min.X++
+ }
+ if b.BorderRight {
+ b.innerArea.Max.X--
+ }
+ if b.BorderTop {
+ b.innerArea.Min.Y++
+ }
+ if b.BorderBottom {
+ b.innerArea.Max.Y--
+ }
+ }
+}
+
+// InnerBounds returns the internal bounds of the block after aligning and
+// calculating the padding and border, if any.
+func (b *Block) InnerBounds() image.Rectangle {
+ b.Align()
+ return b.innerArea
+}
+
+// Buffer implements Bufferer interface.
+// Draw background and border (if any).
+func (b *Block) Buffer() Buffer {
+ b.Align()
+
+ buf := NewBuffer()
+ buf.SetArea(b.area)
+ buf.Fill(' ', ColorDefault, b.Bg)
+
+ b.drawBorder(buf)
+ b.drawBorderLabel(buf)
+
+ return buf
+}
+
+// GetHeight implements GridBufferer.
+// It returns current height of the block.
+func (b Block) GetHeight() int {
+ return b.Height
+}
+
+// SetX implements GridBufferer interface, which sets block's x position.
+func (b *Block) SetX(x int) {
+ b.X = x
+}
+
+// SetY implements GridBufferer interface, it sets y position for block.
+func (b *Block) SetY(y int) {
+ b.Y = y
+}
+
+// SetWidth implements GridBuffer interface, it sets block's width.
+func (b *Block) SetWidth(w int) {
+ b.Width = w
+}
+
+func (b Block) InnerWidth() int {
+ return b.innerArea.Dx()
+}
+
+func (b Block) InnerHeight() int {
+ return b.innerArea.Dy()
+}
+
+func (b Block) InnerX() int {
+ return b.innerArea.Min.X
+}
+
+func (b Block) InnerY() int { return b.innerArea.Min.Y }
diff --git a/vendor/github.com/gizak/termui/block_common.go b/vendor/github.com/gizak/termui/block_common.go
new file mode 100644
index 0000000..6438bf2
--- /dev/null
+++ b/vendor/github.com/gizak/termui/block_common.go
@@ -0,0 +1,20 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build !windows
+
+package termui
+
+const TOP_RIGHT = '┐'
+const VERTICAL_LINE = '│'
+const HORIZONTAL_LINE = '─'
+const TOP_LEFT = '┌'
+const BOTTOM_RIGHT = '┘'
+const BOTTOM_LEFT = '└'
+const VERTICAL_LEFT = '┤'
+const VERTICAL_RIGHT = '├'
+const HORIZONTAL_DOWN = '┬'
+const HORIZONTAL_UP = '┴'
+const QUOTA_LEFT = '«'
+const QUOTA_RIGHT = '»'
diff --git a/vendor/github.com/gizak/termui/block_windows.go b/vendor/github.com/gizak/termui/block_windows.go
new file mode 100644
index 0000000..a4fba77
--- /dev/null
+++ b/vendor/github.com/gizak/termui/block_windows.go
@@ -0,0 +1,14 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build windows
+
+package termui
+
+const TOP_RIGHT = '+'
+const VERTICAL_LINE = '|'
+const HORIZONTAL_LINE = '-'
+const TOP_LEFT = '+'
+const BOTTOM_RIGHT = '+'
+const BOTTOM_LEFT = '+'
diff --git a/vendor/github.com/gizak/termui/buffer.go b/vendor/github.com/gizak/termui/buffer.go
new file mode 100644
index 0000000..9e3a973
--- /dev/null
+++ b/vendor/github.com/gizak/termui/buffer.go
@@ -0,0 +1,106 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "image"
+
+// Cell is a rune with assigned Fg and Bg
+type Cell struct {
+ Ch rune
+ Fg Attribute
+ Bg Attribute
+}
+
+// Buffer is a renderable rectangle cell data container.
+type Buffer struct {
+ Area image.Rectangle // selected drawing area
+ CellMap map[image.Point]Cell
+}
+
+// At returns the cell at (x,y).
+func (b Buffer) At(x, y int) Cell {
+ return b.CellMap[image.Pt(x, y)]
+}
+
+// Set assigns a char to (x,y)
+func (b Buffer) Set(x, y int, c Cell) {
+ b.CellMap[image.Pt(x, y)] = c
+}
+
+// Bounds returns the domain for which At can return non-zero color.
+func (b Buffer) Bounds() image.Rectangle {
+ x0, y0, x1, y1 := 0, 0, 0, 0
+ for p := range b.CellMap {
+ if p.X > x1 {
+ x1 = p.X
+ }
+ if p.X < x0 {
+ x0 = p.X
+ }
+ if p.Y > y1 {
+ y1 = p.Y
+ }
+ if p.Y < y0 {
+ y0 = p.Y
+ }
+ }
+ return image.Rect(x0, y0, x1+1, y1+1)
+}
+
+// SetArea assigns a new rect area to Buffer b.
+func (b *Buffer) SetArea(r image.Rectangle) {
+ b.Area.Max = r.Max
+ b.Area.Min = r.Min
+}
+
+// Sync sets drawing area to the buffer's bound
+func (b *Buffer) Sync() {
+ b.SetArea(b.Bounds())
+}
+
+// NewCell returns a new cell
+func NewCell(ch rune, fg, bg Attribute) Cell {
+ return Cell{ch, fg, bg}
+}
+
+// Merge merges bs Buffers onto b
+func (b *Buffer) Merge(bs ...Buffer) {
+ for _, buf := range bs {
+ for p, v := range buf.CellMap {
+ b.Set(p.X, p.Y, v)
+ }
+ b.SetArea(b.Area.Union(buf.Area))
+ }
+}
+
+// NewBuffer returns a new Buffer
+func NewBuffer() Buffer {
+ return Buffer{
+ CellMap: make(map[image.Point]Cell),
+ Area: image.Rectangle{}}
+}
+
+// Fill fills the Buffer b with ch,fg and bg.
+func (b Buffer) Fill(ch rune, fg, bg Attribute) {
+ for x := b.Area.Min.X; x < b.Area.Max.X; x++ {
+ for y := b.Area.Min.Y; y < b.Area.Max.Y; y++ {
+ b.Set(x, y, Cell{ch, fg, bg})
+ }
+ }
+}
+
+// NewFilledBuffer returns a new Buffer filled with ch, fb and bg.
+func NewFilledBuffer(x0, y0, x1, y1 int, ch rune, fg, bg Attribute) Buffer {
+ buf := NewBuffer()
+ buf.Area.Min = image.Pt(x0, y0)
+ buf.Area.Max = image.Pt(x1, y1)
+
+ for x := buf.Area.Min.X; x < buf.Area.Max.X; x++ {
+ for y := buf.Area.Min.Y; y < buf.Area.Max.Y; y++ {
+ buf.Set(x, y, Cell{ch, fg, bg})
+ }
+ }
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/canvas.go b/vendor/github.com/gizak/termui/canvas.go
new file mode 100644
index 0000000..6d2513e
--- /dev/null
+++ b/vendor/github.com/gizak/termui/canvas.go
@@ -0,0 +1,72 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+/*
+dots:
+ ,___,
+ |1 4|
+ |2 5|
+ |3 6|
+ |7 8|
+ `````
+*/
+
+var brailleBase = '\u2800'
+
+var brailleOftMap = [4][2]rune{
+ {'\u0001', '\u0008'},
+ {'\u0002', '\u0010'},
+ {'\u0004', '\u0020'},
+ {'\u0040', '\u0080'}}
+
+// Canvas contains drawing map: i,j -> rune
+type Canvas map[[2]int]rune
+
+// NewCanvas returns an empty Canvas
+func NewCanvas() Canvas {
+ return make(map[[2]int]rune)
+}
+
+func chOft(x, y int) rune {
+ return brailleOftMap[y%4][x%2]
+}
+
+func (c Canvas) rawCh(x, y int) rune {
+ if ch, ok := c[[2]int{x, y}]; ok {
+ return ch
+ }
+ return '\u0000' //brailleOffset
+}
+
+// return coordinate in terminal
+func chPos(x, y int) (int, int) {
+ return y / 4, x / 2
+}
+
+// Set sets a point (x,y) in the virtual coordinate
+func (c Canvas) Set(x, y int) {
+ i, j := chPos(x, y)
+ ch := c.rawCh(i, j)
+ ch |= chOft(x, y)
+ c[[2]int{i, j}] = ch
+}
+
+// Unset removes point (x,y)
+func (c Canvas) Unset(x, y int) {
+ i, j := chPos(x, y)
+ ch := c.rawCh(i, j)
+ ch &= ^chOft(x, y)
+ c[[2]int{i, j}] = ch
+}
+
+// Buffer returns un-styled points
+func (c Canvas) Buffer() Buffer {
+ buf := NewBuffer()
+ for k, v := range c {
+ buf.Set(k[0], k[1], Cell{Ch: v + brailleBase})
+ }
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/canvas_test.go b/vendor/github.com/gizak/termui/canvas_test.go
new file mode 100644
index 0000000..f73ce48
--- /dev/null
+++ b/vendor/github.com/gizak/termui/canvas_test.go
@@ -0,0 +1,57 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build ignore
+
+package termui
+
+import (
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+func TestCanvasSet(t *testing.T) {
+ c := NewCanvas()
+ c.Set(0, 0)
+ c.Set(0, 1)
+ c.Set(0, 2)
+ c.Set(0, 3)
+ c.Set(1, 3)
+ c.Set(2, 3)
+ c.Set(3, 3)
+ c.Set(4, 3)
+ c.Set(5, 3)
+ spew.Dump(c)
+}
+
+func TestCanvasUnset(t *testing.T) {
+ c := NewCanvas()
+ c.Set(0, 0)
+ c.Set(0, 1)
+ c.Set(0, 2)
+ c.Unset(0, 2)
+ spew.Dump(c)
+ c.Unset(0, 3)
+ spew.Dump(c)
+}
+
+func TestCanvasBuffer(t *testing.T) {
+ c := NewCanvas()
+ c.Set(0, 0)
+ c.Set(0, 1)
+ c.Set(0, 2)
+ c.Set(0, 3)
+ c.Set(1, 3)
+ c.Set(2, 3)
+ c.Set(3, 3)
+ c.Set(4, 3)
+ c.Set(5, 3)
+ c.Set(6, 3)
+ c.Set(7, 2)
+ c.Set(8, 1)
+ c.Set(9, 0)
+ bufs := c.Buffer()
+ spew.Dump(bufs)
+}
diff --git a/vendor/github.com/gizak/termui/config.py b/vendor/github.com/gizak/termui/config.py
new file mode 100644
index 0000000..30fadcf
--- /dev/null
+++ b/vendor/github.com/gizak/termui/config.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+
+import re
+import os
+import io
+
+copyright = """// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+"""
+
+exclude_dirs = [".git", "_docs"]
+exclude_files = []
+include_dirs = [".", "debug", "extra", "test", "_example"]
+
+
+def is_target(fpath):
+ if os.path.splitext(fpath)[-1] == ".go":
+ return True
+ return False
+
+
+def update_copyright(fpath):
+ print("processing " + fpath)
+ f = io.open(fpath, 'r', encoding='utf-8')
+ fstr = f.read()
+ f.close()
+
+ # remove old
+ m = re.search('^// Copyright .+?\r?\n\r?\n', fstr, re.MULTILINE|re.DOTALL)
+ if m:
+ fstr = fstr[m.end():]
+
+ # add new
+ fstr = copyright + fstr
+ f = io.open(fpath, 'w',encoding='utf-8')
+ f.write(fstr)
+ f.close()
+
+
+def main():
+ for d in include_dirs:
+ files = [
+ os.path.join(d, f) for f in os.listdir(d)
+ if os.path.isfile(os.path.join(d, f))
+ ]
+ for f in files:
+ if is_target(f):
+ update_copyright(f)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/vendor/github.com/gizak/termui/doc.go b/vendor/github.com/gizak/termui/doc.go
new file mode 100644
index 0000000..13924eb
--- /dev/null
+++ b/vendor/github.com/gizak/termui/doc.go
@@ -0,0 +1,29 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+/*
+Package termui is a library designed for creating command line UI. For more info, goto http://github.com/gizak/termui
+
+A simplest example:
+ package main
+
+ import ui "github.com/gizak/termui"
+
+ func main() {
+ if err:=ui.Init(); err != nil {
+ panic(err)
+ }
+ defer ui.Close()
+
+ g := ui.NewGauge()
+ g.Percent = 50
+ g.Width = 50
+ g.BorderLabel = "Gauge"
+
+ ui.Render(g)
+
+ ui.Loop()
+ }
+*/
+package termui
diff --git a/vendor/github.com/gizak/termui/events.go b/vendor/github.com/gizak/termui/events.go
new file mode 100644
index 0000000..16d9bd9
--- /dev/null
+++ b/vendor/github.com/gizak/termui/events.go
@@ -0,0 +1,324 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "path"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/nsf/termbox-go"
+)
+
+type Event struct {
+ Type string
+ Path string
+ From string
+ To string
+ Data interface{}
+ Time int64
+}
+
+var sysEvtChs []chan Event
+
+type EvtKbd struct {
+ KeyStr string
+}
+
+func evtKbd(e termbox.Event) EvtKbd {
+ ek := EvtKbd{}
+
+ k := string(e.Ch)
+ pre := ""
+ mod := ""
+
+ if e.Mod == termbox.ModAlt {
+ mod = "M-"
+ }
+ if e.Ch == 0 {
+ if e.Key > 0xFFFF-12 {
+ k = ""
+ } else if e.Key > 0xFFFF-25 {
+ ks := []string{"", "", "", "", "", "", "", "", "", ""}
+ k = ks[0xFFFF-int(e.Key)-12]
+ }
+
+ if e.Key <= 0x7F {
+ pre = "C-"
+ k = string('a' - 1 + int(e.Key))
+ kmap := map[termbox.Key][2]string{
+ termbox.KeyCtrlSpace: {"C-", ""},
+ termbox.KeyBackspace: {"", ""},
+ termbox.KeyTab: {"", ""},
+ termbox.KeyEnter: {"", ""},
+ termbox.KeyEsc: {"", ""},
+ termbox.KeyCtrlBackslash: {"C-", "\\"},
+ termbox.KeyCtrlSlash: {"C-", "/"},
+ termbox.KeySpace: {"", ""},
+ termbox.KeyCtrl8: {"C-", "8"},
+ }
+ if sk, ok := kmap[e.Key]; ok {
+ pre = sk[0]
+ k = sk[1]
+ }
+ }
+ }
+
+ ek.KeyStr = pre + mod + k
+ return ek
+}
+
+func crtTermboxEvt(e termbox.Event) Event {
+ systypemap := map[termbox.EventType]string{
+ termbox.EventKey: "keyboard",
+ termbox.EventResize: "window",
+ termbox.EventMouse: "mouse",
+ termbox.EventError: "error",
+ termbox.EventInterrupt: "interrupt",
+ }
+ ne := Event{From: "/sys", Time: time.Now().Unix()}
+ typ := e.Type
+ ne.Type = systypemap[typ]
+
+ switch typ {
+ case termbox.EventKey:
+ kbd := evtKbd(e)
+ ne.Path = "/sys/kbd/" + kbd.KeyStr
+ ne.Data = kbd
+ case termbox.EventResize:
+ wnd := EvtWnd{}
+ wnd.Width = e.Width
+ wnd.Height = e.Height
+ ne.Path = "/sys/wnd/resize"
+ ne.Data = wnd
+ case termbox.EventError:
+ err := EvtErr(e.Err)
+ ne.Path = "/sys/err"
+ ne.Data = err
+ case termbox.EventMouse:
+ m := EvtMouse{}
+ m.X = e.MouseX
+ m.Y = e.MouseY
+ ne.Path = "/sys/mouse"
+ ne.Data = m
+ }
+ return ne
+}
+
+type EvtWnd struct {
+ Width int
+ Height int
+}
+
+type EvtMouse struct {
+ X int
+ Y int
+ Press string
+}
+
+type EvtErr error
+
+func hookTermboxEvt() {
+ for {
+ e := termbox.PollEvent()
+
+ for _, c := range sysEvtChs {
+ go func(ch chan Event) {
+ ch <- crtTermboxEvt(e)
+ }(c)
+ }
+ }
+}
+
+func NewSysEvtCh() chan Event {
+ ec := make(chan Event)
+ sysEvtChs = append(sysEvtChs, ec)
+ return ec
+}
+
+var DefaultEvtStream = NewEvtStream()
+
+type EvtStream struct {
+ sync.RWMutex
+ srcMap map[string]chan Event
+ stream chan Event
+ wg sync.WaitGroup
+ sigStopLoop chan Event
+ Handlers map[string]func(Event)
+ hook func(Event)
+}
+
+func NewEvtStream() *EvtStream {
+ return &EvtStream{
+ srcMap: make(map[string]chan Event),
+ stream: make(chan Event),
+ Handlers: make(map[string]func(Event)),
+ sigStopLoop: make(chan Event),
+ }
+}
+
+func (es *EvtStream) Init() {
+ es.Merge("internal", es.sigStopLoop)
+ go func() {
+ es.wg.Wait()
+ close(es.stream)
+ }()
+}
+
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ return path.Clean(p)
+}
+
+func isPathMatch(pattern, path string) bool {
+ if len(pattern) == 0 {
+ return false
+ }
+ n := len(pattern)
+ return len(path) >= n && path[0:n] == pattern
+}
+
+func (es *EvtStream) Merge(name string, ec chan Event) {
+ es.Lock()
+ defer es.Unlock()
+
+ es.wg.Add(1)
+ es.srcMap[name] = ec
+
+ go func(a chan Event) {
+ for n := range a {
+ n.From = name
+ es.stream <- n
+ }
+ es.wg.Done()
+ }(ec)
+}
+
+func (es *EvtStream) Handle(path string, handler func(Event)) {
+ es.Handlers[cleanPath(path)] = handler
+}
+
+func findMatch(mux map[string]func(Event), path string) string {
+ n := -1
+ pattern := ""
+ for m := range mux {
+ if !isPathMatch(m, path) {
+ continue
+ }
+ if len(m) > n {
+ pattern = m
+ n = len(m)
+ }
+ }
+ return pattern
+
+}
+
+// Remove all existing defined Handlers from the map
+func (es *EvtStream) ResetHandlers() {
+ for Path, _ := range es.Handlers {
+ delete(es.Handlers, Path)
+ }
+ return
+}
+
+func (es *EvtStream) match(path string) string {
+ return findMatch(es.Handlers, path)
+}
+
+func (es *EvtStream) Hook(f func(Event)) {
+ es.hook = f
+}
+
+func (es *EvtStream) Loop() {
+ for e := range es.stream {
+ switch e.Path {
+ case "/sig/stoploop":
+ return
+ }
+ go func(a Event) {
+ es.RLock()
+ defer es.RUnlock()
+ if pattern := es.match(a.Path); pattern != "" {
+ es.Handlers[pattern](a)
+ }
+ }(e)
+ if es.hook != nil {
+ es.hook(e)
+ }
+ }
+}
+
+func (es *EvtStream) StopLoop() {
+ go func() {
+ e := Event{
+ Path: "/sig/stoploop",
+ }
+ es.sigStopLoop <- e
+ }()
+}
+
+func Merge(name string, ec chan Event) {
+ DefaultEvtStream.Merge(name, ec)
+}
+
+func Handle(path string, handler func(Event)) {
+ DefaultEvtStream.Handle(path, handler)
+}
+
+func Loop() {
+ DefaultEvtStream.Loop()
+}
+
+func StopLoop() {
+ DefaultEvtStream.StopLoop()
+}
+
+type EvtTimer struct {
+ Duration time.Duration
+ Count uint64
+}
+
+func NewTimerCh(du time.Duration) chan Event {
+ t := make(chan Event)
+
+ go func(a chan Event) {
+ n := uint64(0)
+ for {
+ n++
+ time.Sleep(du)
+ e := Event{}
+ e.Type = "timer"
+ e.Path = "/timer/" + du.String()
+ e.Time = time.Now().Unix()
+ e.Data = EvtTimer{
+ Duration: du,
+ Count: n,
+ }
+ t <- e
+
+ }
+ }(t)
+ return t
+}
+
+var DefaultHandler = func(e Event) {
+}
+
+var usrEvtCh = make(chan Event)
+
+func SendCustomEvt(path string, data interface{}) {
+ e := Event{}
+ e.Path = path
+ e.Data = data
+ e.Time = time.Now().Unix()
+ usrEvtCh <- e
+}
diff --git a/vendor/github.com/gizak/termui/gauge.go b/vendor/github.com/gizak/termui/gauge.go
new file mode 100644
index 0000000..9f6ce3a
--- /dev/null
+++ b/vendor/github.com/gizak/termui/gauge.go
@@ -0,0 +1,109 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "strconv"
+ "strings"
+)
+
+// Gauge is a progress bar like widget.
+// A simple example:
+/*
+ g := termui.NewGauge()
+ g.Percent = 40
+ g.Width = 50
+ g.Height = 3
+ g.BorderLabel = "Slim Gauge"
+ g.BarColor = termui.ColorRed
+ g.PercentColor = termui.ColorBlue
+*/
+
+const ColorUndef Attribute = Attribute(^uint16(0))
+
+type Gauge struct {
+ Block
+ Percent int
+ BarColor Attribute
+ PercentColor Attribute
+ PercentColorHighlighted Attribute
+ Label string
+ LabelAlign Align
+}
+
+// NewGauge return a new gauge with current theme.
+func NewGauge() *Gauge {
+ g := &Gauge{
+ Block: *NewBlock(),
+ PercentColor: ThemeAttr("gauge.percent.fg"),
+ BarColor: ThemeAttr("gauge.bar.bg"),
+ Label: "{{percent}}%",
+ LabelAlign: AlignCenter,
+ PercentColorHighlighted: ColorUndef,
+ }
+
+ g.Width = 12
+ g.Height = 5
+ return g
+}
+
+// Buffer implements Bufferer interface.
+func (g *Gauge) Buffer() Buffer {
+ buf := g.Block.Buffer()
+
+ // plot bar
+ w := g.Percent * g.innerArea.Dx() / 100
+ for i := 0; i < g.innerArea.Dy(); i++ {
+ for j := 0; j < w; j++ {
+ c := Cell{}
+ c.Ch = ' '
+ c.Bg = g.BarColor
+ if c.Bg == ColorDefault {
+ c.Bg |= AttrReverse
+ }
+ buf.Set(g.innerArea.Min.X+j, g.innerArea.Min.Y+i, c)
+ }
+ }
+
+ // plot percentage
+ s := strings.Replace(g.Label, "{{percent}}", strconv.Itoa(g.Percent), -1)
+ pry := g.innerArea.Min.Y + g.innerArea.Dy()/2
+ rs := str2runes(s)
+ var pos int
+ switch g.LabelAlign {
+ case AlignLeft:
+ pos = 0
+
+ case AlignCenter:
+ pos = (g.innerArea.Dx() - strWidth(s)) / 2
+
+ case AlignRight:
+ pos = g.innerArea.Dx() - strWidth(s) - 1
+ }
+ pos += g.innerArea.Min.X
+
+ for i, v := range rs {
+ c := Cell{
+ Ch: v,
+ Fg: g.PercentColor,
+ }
+
+ if w+g.innerArea.Min.X > pos+i {
+ c.Bg = g.BarColor
+ if c.Bg == ColorDefault {
+ c.Bg |= AttrReverse
+ }
+
+ if g.PercentColorHighlighted != ColorUndef {
+ c.Fg = g.PercentColorHighlighted
+ }
+ } else {
+ c.Bg = g.Block.Bg
+ }
+
+ buf.Set(1+pos+i, pry, c)
+ }
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/glide.lock b/vendor/github.com/gizak/termui/glide.lock
new file mode 100644
index 0000000..be5952d
--- /dev/null
+++ b/vendor/github.com/gizak/termui/glide.lock
@@ -0,0 +1,30 @@
+hash: 7a754ba100256404a978b2fc8738aee337beb822458e4b6060399fb89ebd215c
+updated: 2016-11-03T17:39:24.323773674-04:00
+imports:
+- name: github.com/maruel/panicparse
+ version: ad661195ed0e88491e0f14be6613304e3b1141d6
+ subpackages:
+ - stack
+- name: github.com/mattn/go-runewidth
+ version: 737072b4e32b7a5018b4a7125da8d12de90e8045
+- name: github.com/mitchellh/go-wordwrap
+ version: ad45545899c7b13c020ea92b2072220eefad42b8
+- name: github.com/nsf/termbox-go
+ version: b6acae516ace002cb8105a89024544a1480655a5
+- name: golang.org/x/net
+ version: 569280fa63be4e201b975e5411e30a92178f0118
+ subpackages:
+ - websocket
+testImports:
+- name: github.com/davecgh/go-spew
+ version: 346938d642f2ec3594ed81d874461961cd0faa76
+ subpackages:
+ - spew
+- name: github.com/pmezard/go-difflib
+ version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
+ subpackages:
+ - difflib
+- name: github.com/stretchr/testify
+ version: 976c720a22c8eb4eb6a0b4348ad85ad12491a506
+ subpackages:
+ - assert
diff --git a/vendor/github.com/gizak/termui/glide.yaml b/vendor/github.com/gizak/termui/glide.yaml
new file mode 100644
index 0000000..a681231
--- /dev/null
+++ b/vendor/github.com/gizak/termui/glide.yaml
@@ -0,0 +1,9 @@
+package: github.com/gizak/termui
+import:
+- package: github.com/mattn/go-runewidth
+- package: github.com/mitchellh/go-wordwrap
+- package: github.com/nsf/termbox-go
+- package: golang.org/x/net
+ subpackages:
+ - websocket
+- package: github.com/maruel/panicparse
diff --git a/vendor/github.com/gizak/termui/grid.go b/vendor/github.com/gizak/termui/grid.go
new file mode 100644
index 0000000..a950232
--- /dev/null
+++ b/vendor/github.com/gizak/termui/grid.go
@@ -0,0 +1,279 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+// GridBufferer introduces a Bufferer that can be manipulated by Grid.
+type GridBufferer interface {
+ Bufferer
+ GetHeight() int
+ SetWidth(int)
+ SetX(int)
+ SetY(int)
+}
+
+// Row builds a layout tree
+type Row struct {
+ Cols []*Row //children
+ Widget GridBufferer // root
+ X int
+ Y int
+ Width int
+ Height int
+ Span int
+ Offset int
+}
+
+// calculate and set the underlying layout tree's x, y, height and width.
+func (r *Row) calcLayout() {
+ r.assignWidth(r.Width)
+ r.Height = r.solveHeight()
+ r.assignX(r.X)
+ r.assignY(r.Y)
+}
+
+// tell if the node is leaf in the tree.
+func (r *Row) isLeaf() bool {
+ return r.Cols == nil || len(r.Cols) == 0
+}
+
+func (r *Row) isRenderableLeaf() bool {
+ return r.isLeaf() && r.Widget != nil
+}
+
+// assign widgets' (and their parent rows') width recursively.
+func (r *Row) assignWidth(w int) {
+ r.SetWidth(w)
+
+ accW := 0 // acc span and offset
+ calcW := make([]int, len(r.Cols)) // calculated width
+ calcOftX := make([]int, len(r.Cols)) // computated start position of x
+
+ for i, c := range r.Cols {
+ accW += c.Span + c.Offset
+ cw := int(float64(c.Span*r.Width) / 12.0)
+
+ if i >= 1 {
+ calcOftX[i] = calcOftX[i-1] +
+ calcW[i-1] +
+ int(float64(r.Cols[i-1].Offset*r.Width)/12.0)
+ }
+
+ // use up the space if it is the last col
+ if i == len(r.Cols)-1 && accW == 12 {
+ cw = r.Width - calcOftX[i]
+ }
+ calcW[i] = cw
+ r.Cols[i].assignWidth(cw)
+ }
+}
+
+// bottom up calc and set rows' (and their widgets') height,
+// return r's total height.
+func (r *Row) solveHeight() int {
+ if r.isRenderableLeaf() {
+ r.Height = r.Widget.GetHeight()
+ return r.Widget.GetHeight()
+ }
+
+ maxh := 0
+ if !r.isLeaf() {
+ for _, c := range r.Cols {
+ nh := c.solveHeight()
+ // when embed rows in Cols, row widgets stack up
+ if r.Widget != nil {
+ nh += r.Widget.GetHeight()
+ }
+ if nh > maxh {
+ maxh = nh
+ }
+ }
+ }
+
+ r.Height = maxh
+ return maxh
+}
+
+// recursively assign x position for r tree.
+func (r *Row) assignX(x int) {
+ r.SetX(x)
+
+ if !r.isLeaf() {
+ acc := 0
+ for i, c := range r.Cols {
+ if c.Offset != 0 {
+ acc += int(float64(c.Offset*r.Width) / 12.0)
+ }
+ r.Cols[i].assignX(x + acc)
+ acc += c.Width
+ }
+ }
+}
+
+// recursively assign y position to r.
+func (r *Row) assignY(y int) {
+ r.SetY(y)
+
+ if r.isLeaf() {
+ return
+ }
+
+ for i := range r.Cols {
+ acc := 0
+ if r.Widget != nil {
+ acc = r.Widget.GetHeight()
+ }
+ r.Cols[i].assignY(y + acc)
+ }
+
+}
+
+// GetHeight implements GridBufferer interface.
+func (r Row) GetHeight() int {
+ return r.Height
+}
+
+// SetX implements GridBufferer interface.
+func (r *Row) SetX(x int) {
+ r.X = x
+ if r.Widget != nil {
+ r.Widget.SetX(x)
+ }
+}
+
+// SetY implements GridBufferer interface.
+func (r *Row) SetY(y int) {
+ r.Y = y
+ if r.Widget != nil {
+ r.Widget.SetY(y)
+ }
+}
+
+// SetWidth implements GridBufferer interface.
+func (r *Row) SetWidth(w int) {
+ r.Width = w
+ if r.Widget != nil {
+ r.Widget.SetWidth(w)
+ }
+}
+
+// Buffer implements Bufferer interface,
+// recursively merge all widgets buffer
+func (r *Row) Buffer() Buffer {
+ merged := NewBuffer()
+
+ if r.isRenderableLeaf() {
+ return r.Widget.Buffer()
+ }
+
+ // for those are not leaves but have a renderable widget
+ if r.Widget != nil {
+ merged.Merge(r.Widget.Buffer())
+ }
+
+ // collect buffer from children
+ if !r.isLeaf() {
+ for _, c := range r.Cols {
+ merged.Merge(c.Buffer())
+ }
+ }
+
+ return merged
+}
+
+// Grid implements 12 columns system.
+// A simple example:
+/*
+ import ui "github.com/gizak/termui"
+ // init and create widgets...
+
+ // build
+ ui.Body.AddRows(
+ ui.NewRow(
+ ui.NewCol(6, 0, widget0),
+ ui.NewCol(6, 0, widget1)),
+ ui.NewRow(
+ ui.NewCol(3, 0, widget2),
+ ui.NewCol(3, 0, widget30, widget31, widget32),
+ ui.NewCol(6, 0, widget4)))
+
+ // calculate layout
+ ui.Body.Align()
+
+ ui.Render(ui.Body)
+*/
+type Grid struct {
+ Rows []*Row
+ Width int
+ X int
+ Y int
+ BgColor Attribute
+}
+
+// NewGrid returns *Grid with given rows.
+func NewGrid(rows ...*Row) *Grid {
+ return &Grid{Rows: rows}
+}
+
+// AddRows appends given rows to Grid.
+func (g *Grid) AddRows(rs ...*Row) {
+ g.Rows = append(g.Rows, rs...)
+}
+
+// NewRow creates a new row out of given columns.
+func NewRow(cols ...*Row) *Row {
+ rs := &Row{Span: 12, Cols: cols}
+ return rs
+}
+
+// NewCol accepts: widgets are LayoutBufferer or widgets is A NewRow.
+// Note that if multiple widgets are provided, they will stack up in the col.
+func NewCol(span, offset int, widgets ...GridBufferer) *Row {
+ r := &Row{Span: span, Offset: offset}
+
+ if widgets != nil && len(widgets) == 1 {
+ wgt := widgets[0]
+ nw, isRow := wgt.(*Row)
+ if isRow {
+ r.Cols = nw.Cols
+ } else {
+ r.Widget = wgt
+ }
+ return r
+ }
+
+ r.Cols = []*Row{}
+ ir := r
+ for _, w := range widgets {
+ nr := &Row{Span: 12, Widget: w}
+ ir.Cols = []*Row{nr}
+ ir = nr
+ }
+
+ return r
+}
+
+// Align calculate each rows' layout.
+func (g *Grid) Align() {
+ h := 0
+ for _, r := range g.Rows {
+ r.SetWidth(g.Width)
+ r.SetX(g.X)
+ r.SetY(g.Y + h)
+ r.calcLayout()
+ h += r.GetHeight()
+ }
+}
+
+// Buffer implments Bufferer interface.
+func (g Grid) Buffer() Buffer {
+ buf := NewBuffer()
+
+ for _, r := range g.Rows {
+ buf.Merge(r.Buffer())
+ }
+ return buf
+}
+
+var Body *Grid
diff --git a/vendor/github.com/gizak/termui/helper.go b/vendor/github.com/gizak/termui/helper.go
new file mode 100644
index 0000000..18a6770
--- /dev/null
+++ b/vendor/github.com/gizak/termui/helper.go
@@ -0,0 +1,222 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "regexp"
+ "strings"
+
+ tm "github.com/nsf/termbox-go"
+)
+import rw "github.com/mattn/go-runewidth"
+
+/* ---------------Port from termbox-go --------------------- */
+
+// Attribute is printable cell's color and style.
+type Attribute uint16
+
+// 8 basic clolrs
+const (
+ ColorDefault Attribute = iota
+ ColorBlack
+ ColorRed
+ ColorGreen
+ ColorYellow
+ ColorBlue
+ ColorMagenta
+ ColorCyan
+ ColorWhite
+)
+
+//Have a constant that defines number of colors
+const NumberofColors = 8
+
+// Text style
+const (
+ AttrBold Attribute = 1 << (iota + 9)
+ AttrUnderline
+ AttrReverse
+)
+
+var (
+ dot = "…"
+ dotw = rw.StringWidth(dot)
+)
+
+/* ----------------------- End ----------------------------- */
+
+func toTmAttr(x Attribute) tm.Attribute {
+ return tm.Attribute(x)
+}
+
+func str2runes(s string) []rune {
+ return []rune(s)
+}
+
+// Here for backwards-compatibility.
+func trimStr2Runes(s string, w int) []rune {
+ return TrimStr2Runes(s, w)
+}
+
+// TrimStr2Runes trims string to w[-1 rune], appends …, and returns the runes
+// of that string if string is grather then n. If string is small then w,
+// return the runes.
+func TrimStr2Runes(s string, w int) []rune {
+ if w <= 0 {
+ return []rune{}
+ }
+
+ sw := rw.StringWidth(s)
+ if sw > w {
+ return []rune(rw.Truncate(s, w, dot))
+ }
+ return str2runes(s)
+}
+
+// TrimStrIfAppropriate trim string to "s[:-1] + …"
+// if string > width otherwise return string
+func TrimStrIfAppropriate(s string, w int) string {
+ if w <= 0 {
+ return ""
+ }
+
+ sw := rw.StringWidth(s)
+ if sw > w {
+ return rw.Truncate(s, w, dot)
+ }
+
+ return s
+}
+
+func strWidth(s string) int {
+ return rw.StringWidth(s)
+}
+
+func charWidth(ch rune) int {
+ return rw.RuneWidth(ch)
+}
+
+var whiteSpaceRegex = regexp.MustCompile(`\s`)
+
+// StringToAttribute converts text to a termui attribute. You may specifiy more
+// then one attribute like that: "BLACK, BOLD, ...". All whitespaces
+// are ignored.
+func StringToAttribute(text string) Attribute {
+ text = whiteSpaceRegex.ReplaceAllString(strings.ToLower(text), "")
+ attributes := strings.Split(text, ",")
+ result := Attribute(0)
+
+ for _, theAttribute := range attributes {
+ var match Attribute
+ switch theAttribute {
+ case "reset", "default":
+ match = ColorDefault
+
+ case "black":
+ match = ColorBlack
+
+ case "red":
+ match = ColorRed
+
+ case "green":
+ match = ColorGreen
+
+ case "yellow":
+ match = ColorYellow
+
+ case "blue":
+ match = ColorBlue
+
+ case "magenta":
+ match = ColorMagenta
+
+ case "cyan":
+ match = ColorCyan
+
+ case "white":
+ match = ColorWhite
+
+ case "bold":
+ match = AttrBold
+
+ case "underline":
+ match = AttrUnderline
+
+ case "reverse":
+ match = AttrReverse
+ }
+
+ result |= match
+ }
+
+ return result
+}
+
+// TextCells returns a coloured text cells []Cell
+func TextCells(s string, fg, bg Attribute) []Cell {
+ cs := make([]Cell, 0, len(s))
+
+ // sequence := MarkdownTextRendererFactory{}.TextRenderer(s).Render(fg, bg)
+ // runes := []rune(sequence.NormalizedText)
+ runes := str2runes(s)
+
+ for n := range runes {
+ // point, _ := sequence.PointAt(n, 0, 0)
+ // cs = append(cs, Cell{point.Ch, point.Fg, point.Bg})
+ cs = append(cs, Cell{runes[n], fg, bg})
+ }
+ return cs
+}
+
+// Width returns the actual screen space the cell takes (usually 1 or 2).
+func (c Cell) Width() int {
+ return charWidth(c.Ch)
+}
+
+// Copy return a copy of c
+func (c Cell) Copy() Cell {
+ return c
+}
+
+// TrimTxCells trims the overflowed text cells sequence.
+func TrimTxCells(cs []Cell, w int) []Cell {
+ if len(cs) <= w {
+ return cs
+ }
+ return cs[:w]
+}
+
+// DTrimTxCls trims the overflowed text cells sequence and append dots at the end.
+func DTrimTxCls(cs []Cell, w int) []Cell {
+ l := len(cs)
+ if l <= 0 {
+ return []Cell{}
+ }
+
+ rt := make([]Cell, 0, w)
+ csw := 0
+ for i := 0; i < l && csw <= w; i++ {
+ c := cs[i]
+ cw := c.Width()
+
+ if cw+csw < w {
+ rt = append(rt, c)
+ csw += cw
+ } else {
+ rt = append(rt, Cell{'…', c.Fg, c.Bg})
+ break
+ }
+ }
+
+ return rt
+}
+
+func CellsToStr(cs []Cell) string {
+ str := ""
+ for _, c := range cs {
+ str += string(c.Ch)
+ }
+ return str
+}
diff --git a/vendor/github.com/gizak/termui/linechart.go b/vendor/github.com/gizak/termui/linechart.go
new file mode 100644
index 0000000..f7eea28
--- /dev/null
+++ b/vendor/github.com/gizak/termui/linechart.go
@@ -0,0 +1,331 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "fmt"
+ "math"
+)
+
+// only 16 possible combinations, why bother
+var braillePatterns = map[[2]int]rune{
+ [2]int{0, 0}: '⣀',
+ [2]int{0, 1}: '⡠',
+ [2]int{0, 2}: '⡐',
+ [2]int{0, 3}: '⡈',
+
+ [2]int{1, 0}: '⢄',
+ [2]int{1, 1}: '⠤',
+ [2]int{1, 2}: '⠔',
+ [2]int{1, 3}: '⠌',
+
+ [2]int{2, 0}: '⢂',
+ [2]int{2, 1}: '⠢',
+ [2]int{2, 2}: '⠒',
+ [2]int{2, 3}: '⠊',
+
+ [2]int{3, 0}: '⢁',
+ [2]int{3, 1}: '⠡',
+ [2]int{3, 2}: '⠑',
+ [2]int{3, 3}: '⠉',
+}
+
+var lSingleBraille = [4]rune{'\u2840', '⠄', '⠂', '⠁'}
+var rSingleBraille = [4]rune{'\u2880', '⠠', '⠐', '⠈'}
+
+// LineChart has two modes: braille(default) and dot. Using braille gives 2x capicity as dot mode,
+// because one braille char can represent two data points.
+/*
+ lc := termui.NewLineChart()
+ lc.BorderLabel = "braille-mode Line Chart"
+ lc.Data = [1.2, 1.3, 1.5, 1.7, 1.5, 1.6, 1.8, 2.0]
+ lc.Width = 50
+ lc.Height = 12
+ lc.AxesColor = termui.ColorWhite
+ lc.LineColor = termui.ColorGreen | termui.AttrBold
+ // termui.Render(lc)...
+*/
+type LineChart struct {
+ Block
+ Data []float64
+ DataLabels []string // if unset, the data indices will be used
+ Mode string // braille | dot
+ DotStyle rune
+ LineColor Attribute
+ scale float64 // data span per cell on y-axis
+ AxesColor Attribute
+ drawingX int
+ drawingY int
+ axisYHeight int
+ axisXWidth int
+ axisYLabelGap int
+ axisXLabelGap int
+ topValue float64
+ bottomValue float64
+ labelX [][]rune
+ labelY [][]rune
+ labelYSpace int
+ maxY float64
+ minY float64
+ autoLabels bool
+}
+
+// NewLineChart returns a new LineChart with current theme.
+func NewLineChart() *LineChart {
+ lc := &LineChart{Block: *NewBlock()}
+ lc.AxesColor = ThemeAttr("linechart.axes.fg")
+ lc.LineColor = ThemeAttr("linechart.line.fg")
+ lc.Mode = "braille"
+ lc.DotStyle = '•'
+ lc.axisXLabelGap = 2
+ lc.axisYLabelGap = 1
+ lc.bottomValue = math.Inf(1)
+ lc.topValue = math.Inf(-1)
+ return lc
+}
+
+// one cell contains two data points
+// so the capicity is 2x as dot-mode
+func (lc *LineChart) renderBraille() Buffer {
+ buf := NewBuffer()
+
+ // return: b -> which cell should the point be in
+ // m -> in the cell, divided into 4 equal height levels, which subcell?
+ getPos := func(d float64) (b, m int) {
+ cnt4 := int((d-lc.bottomValue)/(lc.scale/4) + 0.5)
+ b = cnt4 / 4
+ m = cnt4 % 4
+ return
+ }
+ // plot points
+ for i := 0; 2*i+1 < len(lc.Data) && i < lc.axisXWidth; i++ {
+ b0, m0 := getPos(lc.Data[2*i])
+ b1, m1 := getPos(lc.Data[2*i+1])
+
+ if b0 == b1 {
+ c := Cell{
+ Ch: braillePatterns[[2]int{m0, m1}],
+ Bg: lc.Bg,
+ Fg: lc.LineColor,
+ }
+ y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0
+ x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
+ buf.Set(x, y, c)
+ } else {
+ c0 := Cell{Ch: lSingleBraille[m0],
+ Fg: lc.LineColor,
+ Bg: lc.Bg}
+ x0 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
+ y0 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0
+ buf.Set(x0, y0, c0)
+
+ c1 := Cell{Ch: rSingleBraille[m1],
+ Fg: lc.LineColor,
+ Bg: lc.Bg}
+ x1 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
+ y1 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b1
+ buf.Set(x1, y1, c1)
+ }
+
+ }
+ return buf
+}
+
+func (lc *LineChart) renderDot() Buffer {
+ buf := NewBuffer()
+ for i := 0; i < len(lc.Data) && i < lc.axisXWidth; i++ {
+ c := Cell{
+ Ch: lc.DotStyle,
+ Fg: lc.LineColor,
+ Bg: lc.Bg,
+ }
+ x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
+ y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - int((lc.Data[i]-lc.bottomValue)/lc.scale+0.5)
+ buf.Set(x, y, c)
+ }
+
+ return buf
+}
+
+func (lc *LineChart) calcLabelX() {
+ lc.labelX = [][]rune{}
+
+ for i, l := 0, 0; i < len(lc.DataLabels) && l < lc.axisXWidth; i++ {
+ if lc.Mode == "dot" {
+ if l >= len(lc.DataLabels) {
+ break
+ }
+
+ s := str2runes(lc.DataLabels[l])
+ w := strWidth(lc.DataLabels[l])
+ if l+w <= lc.axisXWidth {
+ lc.labelX = append(lc.labelX, s)
+ }
+ l += w + lc.axisXLabelGap
+ } else { // braille
+ if 2*l >= len(lc.DataLabels) {
+ break
+ }
+
+ s := str2runes(lc.DataLabels[2*l])
+ w := strWidth(lc.DataLabels[2*l])
+ if l+w <= lc.axisXWidth {
+ lc.labelX = append(lc.labelX, s)
+ }
+ l += w + lc.axisXLabelGap
+
+ }
+ }
+}
+
+func shortenFloatVal(x float64) string {
+ s := fmt.Sprintf("%.2f", x)
+ if len(s)-3 > 3 {
+ s = fmt.Sprintf("%.2e", x)
+ }
+
+ if x < 0 {
+ s = fmt.Sprintf("%.2f", x)
+ }
+ return s
+}
+
+func (lc *LineChart) calcLabelY() {
+ span := lc.topValue - lc.bottomValue
+ lc.scale = span / float64(lc.axisYHeight)
+
+ n := (1 + lc.axisYHeight) / (lc.axisYLabelGap + 1)
+ lc.labelY = make([][]rune, n)
+ maxLen := 0
+ for i := 0; i < n; i++ {
+ s := str2runes(shortenFloatVal(lc.bottomValue + float64(i)*span/float64(n)))
+ if len(s) > maxLen {
+ maxLen = len(s)
+ }
+ lc.labelY[i] = s
+ }
+
+ lc.labelYSpace = maxLen
+}
+
+func (lc *LineChart) calcLayout() {
+ // set datalabels if it is not provided
+ if (lc.DataLabels == nil || len(lc.DataLabels) == 0) || lc.autoLabels {
+ lc.autoLabels = true
+ lc.DataLabels = make([]string, len(lc.Data))
+ for i := range lc.Data {
+ lc.DataLabels[i] = fmt.Sprint(i)
+ }
+ }
+
+ // lazy increase, to avoid y shaking frequently
+ // update bound Y when drawing is gonna overflow
+ lc.minY = lc.Data[0]
+ lc.maxY = lc.Data[0]
+
+ // valid visible range
+ vrange := lc.innerArea.Dx()
+ if lc.Mode == "braille" {
+ vrange = 2 * lc.innerArea.Dx()
+ }
+ if vrange > len(lc.Data) {
+ vrange = len(lc.Data)
+ }
+
+ for _, v := range lc.Data[:vrange] {
+ if v > lc.maxY {
+ lc.maxY = v
+ }
+ if v < lc.minY {
+ lc.minY = v
+ }
+ }
+
+ span := lc.maxY - lc.minY
+
+ if lc.minY < lc.bottomValue {
+ lc.bottomValue = lc.minY - 0.2*span
+ }
+
+ if lc.maxY > lc.topValue {
+ lc.topValue = lc.maxY + 0.2*span
+ }
+
+ lc.axisYHeight = lc.innerArea.Dy() - 2
+ lc.calcLabelY()
+
+ lc.axisXWidth = lc.innerArea.Dx() - 1 - lc.labelYSpace
+ lc.calcLabelX()
+
+ lc.drawingX = lc.innerArea.Min.X + 1 + lc.labelYSpace
+ lc.drawingY = lc.innerArea.Min.Y
+}
+
+func (lc *LineChart) plotAxes() Buffer {
+ buf := NewBuffer()
+
+ origY := lc.innerArea.Min.Y + lc.innerArea.Dy() - 2
+ origX := lc.innerArea.Min.X + lc.labelYSpace
+
+ buf.Set(origX, origY, Cell{Ch: ORIGIN, Fg: lc.AxesColor, Bg: lc.Bg})
+
+ for x := origX + 1; x < origX+lc.axisXWidth; x++ {
+ buf.Set(x, origY, Cell{Ch: HDASH, Fg: lc.AxesColor, Bg: lc.Bg})
+ }
+
+ for dy := 1; dy <= lc.axisYHeight; dy++ {
+ buf.Set(origX, origY-dy, Cell{Ch: VDASH, Fg: lc.AxesColor, Bg: lc.Bg})
+ }
+
+ // x label
+ oft := 0
+ for _, rs := range lc.labelX {
+ if oft+len(rs) > lc.axisXWidth {
+ break
+ }
+ for j, r := range rs {
+ c := Cell{
+ Ch: r,
+ Fg: lc.AxesColor,
+ Bg: lc.Bg,
+ }
+ x := origX + oft + j
+ y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 1
+ buf.Set(x, y, c)
+ }
+ oft += len(rs) + lc.axisXLabelGap
+ }
+
+ // y labels
+ for i, rs := range lc.labelY {
+ for j, r := range rs {
+ buf.Set(
+ lc.innerArea.Min.X+j,
+ origY-i*(lc.axisYLabelGap+1),
+ Cell{Ch: r, Fg: lc.AxesColor, Bg: lc.Bg})
+ }
+ }
+
+ return buf
+}
+
+// Buffer implements Bufferer interface.
+func (lc *LineChart) Buffer() Buffer {
+ buf := lc.Block.Buffer()
+
+ if lc.Data == nil || len(lc.Data) == 0 {
+ return buf
+ }
+ lc.calcLayout()
+ buf.Merge(lc.plotAxes())
+
+ if lc.Mode == "dot" {
+ buf.Merge(lc.renderDot())
+ } else {
+ buf.Merge(lc.renderBraille())
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/linechart_others.go b/vendor/github.com/gizak/termui/linechart_others.go
new file mode 100644
index 0000000..14897ea
--- /dev/null
+++ b/vendor/github.com/gizak/termui/linechart_others.go
@@ -0,0 +1,11 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build !windows
+
+package termui
+
+const VDASH = '┊'
+const HDASH = '┈'
+const ORIGIN = '└'
diff --git a/vendor/github.com/gizak/termui/linechart_windows.go b/vendor/github.com/gizak/termui/linechart_windows.go
new file mode 100644
index 0000000..994d3e5
--- /dev/null
+++ b/vendor/github.com/gizak/termui/linechart_windows.go
@@ -0,0 +1,11 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build windows
+
+package termui
+
+const VDASH = '|'
+const HDASH = '-'
+const ORIGIN = '+'
diff --git a/vendor/github.com/gizak/termui/list.go b/vendor/github.com/gizak/termui/list.go
new file mode 100644
index 0000000..ea6635e
--- /dev/null
+++ b/vendor/github.com/gizak/termui/list.go
@@ -0,0 +1,89 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "strings"
+
+// List displays []string as its items,
+// it has a Overflow option (default is "hidden"), when set to "hidden",
+// the item exceeding List's width is truncated, but when set to "wrap",
+// the overflowed text breaks into next line.
+/*
+ strs := []string{
+ "[0] github.com/gizak/termui",
+ "[1] editbox.go",
+ "[2] iterrupt.go",
+ "[3] keyboard.go",
+ "[4] output.go",
+ "[5] random_out.go",
+ "[6] dashboard.go",
+ "[7] nsf/termbox-go"}
+
+ ls := termui.NewList()
+ ls.Items = strs
+ ls.ItemFgColor = termui.ColorYellow
+ ls.BorderLabel = "List"
+ ls.Height = 7
+ ls.Width = 25
+ ls.Y = 0
+*/
+type List struct {
+ Block
+ Items []string
+ Overflow string
+ ItemFgColor Attribute
+ ItemBgColor Attribute
+}
+
+// NewList returns a new *List with current theme.
+func NewList() *List {
+ l := &List{Block: *NewBlock()}
+ l.Overflow = "hidden"
+ l.ItemFgColor = ThemeAttr("list.item.fg")
+ l.ItemBgColor = ThemeAttr("list.item.bg")
+ return l
+}
+
+// Buffer implements Bufferer interface.
+func (l *List) Buffer() Buffer {
+ buf := l.Block.Buffer()
+
+ switch l.Overflow {
+ case "wrap":
+ cs := DefaultTxBuilder.Build(strings.Join(l.Items, "\n"), l.ItemFgColor, l.ItemBgColor)
+ i, j, k := 0, 0, 0
+ for i < l.innerArea.Dy() && k < len(cs) {
+ w := cs[k].Width()
+ if cs[k].Ch == '\n' || j+w > l.innerArea.Dx() {
+ i++
+ j = 0
+ if cs[k].Ch == '\n' {
+ k++
+ }
+ continue
+ }
+ buf.Set(l.innerArea.Min.X+j, l.innerArea.Min.Y+i, cs[k])
+
+ k++
+ j++
+ }
+
+ case "hidden":
+ trimItems := l.Items
+ if len(trimItems) > l.innerArea.Dy() {
+ trimItems = trimItems[:l.innerArea.Dy()]
+ }
+ for i, v := range trimItems {
+ cs := DTrimTxCls(DefaultTxBuilder.Build(v, l.ItemFgColor, l.ItemBgColor), l.innerArea.Dx())
+ j := 0
+ for _, vv := range cs {
+ w := vv.Width()
+ buf.Set(l.innerArea.Min.X+j, l.innerArea.Min.Y+i, vv)
+ j += w
+ }
+ }
+ }
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/mbarchart.go b/vendor/github.com/gizak/termui/mbarchart.go
new file mode 100644
index 0000000..0f91e97
--- /dev/null
+++ b/vendor/github.com/gizak/termui/mbarchart.go
@@ -0,0 +1,242 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "fmt"
+)
+
+// This is the implemetation of multi-colored or stacked bar graph. This is different from default barGraph which is implemented in bar.go
+// Multi-Colored-BarChart creates multiple bars in a widget:
+/*
+ bc := termui.NewMBarChart()
+ data := make([][]int, 2)
+ data[0] := []int{3, 2, 5, 7, 9, 4}
+ data[1] := []int{7, 8, 5, 3, 1, 6}
+ bclabels := []string{"S0", "S1", "S2", "S3", "S4", "S5"}
+ bc.BorderLabel = "Bar Chart"
+ bc.Data = data
+ bc.Width = 26
+ bc.Height = 10
+ bc.DataLabels = bclabels
+ bc.TextColor = termui.ColorGreen
+ bc.BarColor = termui.ColorRed
+ bc.NumColor = termui.ColorYellow
+*/
+type MBarChart struct {
+ Block
+ BarColor [NumberofColors]Attribute
+ TextColor Attribute
+ NumColor [NumberofColors]Attribute
+ Data [NumberofColors][]int
+ DataLabels []string
+ BarWidth int
+ BarGap int
+ labels [][]rune
+ dataNum [NumberofColors][][]rune
+ numBar int
+ scale float64
+ max int
+ minDataLen int
+ numStack int
+ ShowScale bool
+ maxScale []rune
+}
+
+// NewBarChart returns a new *BarChart with current theme.
+func NewMBarChart() *MBarChart {
+ bc := &MBarChart{Block: *NewBlock()}
+ bc.BarColor[0] = ThemeAttr("mbarchart.bar.bg")
+ bc.NumColor[0] = ThemeAttr("mbarchart.num.fg")
+ bc.TextColor = ThemeAttr("mbarchart.text.fg")
+ bc.BarGap = 1
+ bc.BarWidth = 3
+ return bc
+}
+
+func (bc *MBarChart) layout() {
+ bc.numBar = bc.innerArea.Dx() / (bc.BarGap + bc.BarWidth)
+ bc.labels = make([][]rune, bc.numBar)
+ DataLen := 0
+ LabelLen := len(bc.DataLabels)
+ bc.minDataLen = 9999 //Set this to some very hight value so that we find the minimum one We want to know which array among data[][] has got the least length
+
+ // We need to know how many stack/data array data[0] , data[1] are there
+ for i := 0; i < len(bc.Data); i++ {
+ if bc.Data[i] == nil {
+ break
+ }
+ DataLen++
+ }
+ bc.numStack = DataLen
+
+ //We need to know what is the mimimum size of data array data[0] could have 10 elements data[1] could have only 5, so we plot only 5 bar graphs
+
+ for i := 0; i < DataLen; i++ {
+ if bc.minDataLen > len(bc.Data[i]) {
+ bc.minDataLen = len(bc.Data[i])
+ }
+ }
+
+ if LabelLen > bc.minDataLen {
+ LabelLen = bc.minDataLen
+ }
+
+ for i := 0; i < LabelLen && i < bc.numBar; i++ {
+ bc.labels[i] = trimStr2Runes(bc.DataLabels[i], bc.BarWidth)
+ }
+
+ for i := 0; i < bc.numStack; i++ {
+ bc.dataNum[i] = make([][]rune, len(bc.Data[i]))
+ //For each stack of bar calcualte the rune
+ for j := 0; j < LabelLen && i < bc.numBar; j++ {
+ n := bc.Data[i][j]
+ s := fmt.Sprint(n)
+ bc.dataNum[i][j] = trimStr2Runes(s, bc.BarWidth)
+ }
+ //If color is not defined by default then populate a color that is different from the prevous bar
+ if bc.BarColor[i] == ColorDefault && bc.NumColor[i] == ColorDefault {
+ if i == 0 {
+ bc.BarColor[i] = ColorBlack
+ } else {
+ bc.BarColor[i] = bc.BarColor[i-1] + 1
+ if bc.BarColor[i] > NumberofColors {
+ bc.BarColor[i] = ColorBlack
+ }
+ }
+ bc.NumColor[i] = (NumberofColors + 1) - bc.BarColor[i] //Make NumColor opposite of barColor for visibility
+ }
+ }
+
+ //If Max value is not set then we have to populate, this time the max value will be max(sum(d1[0],d2[0],d3[0]) .... sum(d1[n], d2[n], d3[n]))
+
+ if bc.max == 0 {
+ bc.max = -1
+ }
+ for i := 0; i < bc.minDataLen && i < LabelLen; i++ {
+ var dsum int
+ for j := 0; j < bc.numStack; j++ {
+ dsum += bc.Data[j][i]
+ }
+ if dsum > bc.max {
+ bc.max = dsum
+ }
+ }
+
+ //Finally Calculate max sale
+ if bc.ShowScale {
+ s := fmt.Sprintf("%d", bc.max)
+ bc.maxScale = trimStr2Runes(s, len(s))
+ bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-2)
+ } else {
+ bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-1)
+ }
+
+}
+
+func (bc *MBarChart) SetMax(max int) {
+
+ if max > 0 {
+ bc.max = max
+ }
+}
+
+// Buffer implements Bufferer interface.
+func (bc *MBarChart) Buffer() Buffer {
+ buf := bc.Block.Buffer()
+ bc.layout()
+ var oftX int
+
+ for i := 0; i < bc.numBar && i < bc.minDataLen && i < len(bc.DataLabels); i++ {
+ ph := 0 //Previous Height to stack up
+ oftX = i * (bc.BarWidth + bc.BarGap)
+ for i1 := 0; i1 < bc.numStack; i1++ {
+ h := int(float64(bc.Data[i1][i]) / bc.scale)
+ // plot bars
+ for j := 0; j < bc.BarWidth; j++ {
+ for k := 0; k < h; k++ {
+ c := Cell{
+ Ch: ' ',
+ Bg: bc.BarColor[i1],
+ }
+ if bc.BarColor[i1] == ColorDefault { // when color is default, space char treated as transparent!
+ c.Bg |= AttrReverse
+ }
+ x := bc.innerArea.Min.X + i*(bc.BarWidth+bc.BarGap) + j
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - k - ph
+ buf.Set(x, y, c)
+
+ }
+ }
+ ph += h
+ }
+ // plot text
+ for j, k := 0, 0; j < len(bc.labels[i]); j++ {
+ w := charWidth(bc.labels[i][j])
+ c := Cell{
+ Ch: bc.labels[i][j],
+ Bg: bc.Bg,
+ Fg: bc.TextColor,
+ }
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 1
+ x := bc.innerArea.Max.X + oftX + ((bc.BarWidth - len(bc.labels[i])) / 2) + k
+ buf.Set(x, y, c)
+ k += w
+ }
+ // plot num
+ ph = 0 //re-initialize previous height
+ for i1 := 0; i1 < bc.numStack; i1++ {
+ h := int(float64(bc.Data[i1][i]) / bc.scale)
+ for j := 0; j < len(bc.dataNum[i1][i]) && h > 0; j++ {
+ c := Cell{
+ Ch: bc.dataNum[i1][i][j],
+ Fg: bc.NumColor[i1],
+ Bg: bc.BarColor[i1],
+ }
+ if bc.BarColor[i1] == ColorDefault { // the same as above
+ c.Bg |= AttrReverse
+ }
+ if h == 0 {
+ c.Bg = bc.Bg
+ }
+ x := bc.innerArea.Min.X + oftX + (bc.BarWidth-len(bc.dataNum[i1][i]))/2 + j
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - ph
+ buf.Set(x, y, c)
+ }
+ ph += h
+ }
+ }
+
+ if bc.ShowScale {
+ //Currently bar graph only supprts data range from 0 to MAX
+ //Plot 0
+ c := Cell{
+ Ch: '0',
+ Bg: bc.Bg,
+ Fg: bc.TextColor,
+ }
+
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2
+ x := bc.X
+ buf.Set(x, y, c)
+
+ //Plot the maximum sacle value
+ for i := 0; i < len(bc.maxScale); i++ {
+ c := Cell{
+ Ch: bc.maxScale[i],
+ Bg: bc.Bg,
+ Fg: bc.TextColor,
+ }
+
+ y := bc.innerArea.Min.Y
+ x := bc.X + i
+
+ buf.Set(x, y, c)
+ }
+
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/mkdocs.yml b/vendor/github.com/gizak/termui/mkdocs.yml
new file mode 100644
index 0000000..2ab45f0
--- /dev/null
+++ b/vendor/github.com/gizak/termui/mkdocs.yml
@@ -0,0 +1,28 @@
+pages:
+- Home: 'index.md'
+- Quickstart: 'quickstart.md'
+- Recipes: 'recipes.md'
+- References:
+ - Layouts: 'layouts.md'
+ - Components: 'components.md'
+ - Events: 'events.md'
+ - Themes: 'themes.md'
+- Versions: 'versions.md'
+- About: 'about.md'
+
+site_name: termui
+repo_url: https://github.com/gizak/termui/
+site_description: 'termui user guide'
+site_author: gizak
+
+docs_dir: '_docs'
+
+theme: readthedocs
+
+markdown_extensions:
+ - smarty
+ - admonition
+ - toc
+
+extra:
+ version: 1.0
diff --git a/vendor/github.com/gizak/termui/par.go b/vendor/github.com/gizak/termui/par.go
new file mode 100644
index 0000000..29b6d46
--- /dev/null
+++ b/vendor/github.com/gizak/termui/par.go
@@ -0,0 +1,73 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+// Par displays a paragraph.
+/*
+ par := termui.NewPar("Simple Text")
+ par.Height = 3
+ par.Width = 17
+ par.BorderLabel = "Label"
+*/
+type Par struct {
+ Block
+ Text string
+ TextFgColor Attribute
+ TextBgColor Attribute
+ WrapLength int // words wrap limit. Note it may not work properly with multi-width char
+}
+
+// NewPar returns a new *Par with given text as its content.
+func NewPar(s string) *Par {
+ return &Par{
+ Block: *NewBlock(),
+ Text: s,
+ TextFgColor: ThemeAttr("par.text.fg"),
+ TextBgColor: ThemeAttr("par.text.bg"),
+ WrapLength: 0,
+ }
+}
+
+// Buffer implements Bufferer interface.
+func (p *Par) Buffer() Buffer {
+ buf := p.Block.Buffer()
+
+ fg, bg := p.TextFgColor, p.TextBgColor
+ cs := DefaultTxBuilder.Build(p.Text, fg, bg)
+
+ // wrap if WrapLength set
+ if p.WrapLength < 0 {
+ cs = wrapTx(cs, p.Width-2)
+ } else if p.WrapLength > 0 {
+ cs = wrapTx(cs, p.WrapLength)
+ }
+
+ y, x, n := 0, 0, 0
+ for y < p.innerArea.Dy() && n < len(cs) {
+ w := cs[n].Width()
+ if cs[n].Ch == '\n' || x+w > p.innerArea.Dx() {
+ y++
+ x = 0 // set x = 0
+ if cs[n].Ch == '\n' {
+ n++
+ }
+
+ if y >= p.innerArea.Dy() {
+ buf.Set(p.innerArea.Min.X+p.innerArea.Dx()-1,
+ p.innerArea.Min.Y+p.innerArea.Dy()-1,
+ Cell{Ch: '…', Fg: p.TextFgColor, Bg: p.TextBgColor})
+ break
+ }
+ continue
+ }
+
+ buf.Set(p.innerArea.Min.X+x, p.innerArea.Min.Y+y, cs[n])
+
+ n++
+ x += w
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/pos.go b/vendor/github.com/gizak/termui/pos.go
new file mode 100644
index 0000000..c7d647f
--- /dev/null
+++ b/vendor/github.com/gizak/termui/pos.go
@@ -0,0 +1,78 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "image"
+
+// Align is the position of the gauge's label.
+type Align uint
+
+// All supported positions.
+const (
+ AlignNone Align = 0
+ AlignLeft Align = 1 << iota
+ AlignRight
+ AlignBottom
+ AlignTop
+ AlignCenterVertical
+ AlignCenterHorizontal
+ AlignCenter = AlignCenterVertical | AlignCenterHorizontal
+)
+
+func AlignArea(parent, child image.Rectangle, a Align) image.Rectangle {
+ w, h := child.Dx(), child.Dy()
+
+ // parent center
+ pcx, pcy := parent.Min.X+parent.Dx()/2, parent.Min.Y+parent.Dy()/2
+ // child center
+ ccx, ccy := child.Min.X+child.Dx()/2, child.Min.Y+child.Dy()/2
+
+ if a&AlignLeft == AlignLeft {
+ child.Min.X = parent.Min.X
+ child.Max.X = child.Min.X + w
+ }
+
+ if a&AlignRight == AlignRight {
+ child.Max.X = parent.Max.X
+ child.Min.X = child.Max.X - w
+ }
+
+ if a&AlignBottom == AlignBottom {
+ child.Max.Y = parent.Max.Y
+ child.Min.Y = child.Max.Y - h
+ }
+
+ if a&AlignTop == AlignRight {
+ child.Min.Y = parent.Min.Y
+ child.Max.Y = child.Min.Y + h
+ }
+
+ if a&AlignCenterHorizontal == AlignCenterHorizontal {
+ child.Min.X += pcx - ccx
+ child.Max.X = child.Min.X + w
+ }
+
+ if a&AlignCenterVertical == AlignCenterVertical {
+ child.Min.Y += pcy - ccy
+ child.Max.Y = child.Min.Y + h
+ }
+
+ return child
+}
+
+func MoveArea(a image.Rectangle, dx, dy int) image.Rectangle {
+ a.Min.X += dx
+ a.Max.X += dx
+ a.Min.Y += dy
+ a.Max.Y += dy
+ return a
+}
+
+var termWidth int
+var termHeight int
+
+func TermRect() image.Rectangle {
+ return image.Rect(0, 0, termWidth, termHeight)
+}
diff --git a/vendor/github.com/gizak/termui/render.go b/vendor/github.com/gizak/termui/render.go
new file mode 100644
index 0000000..b9d37d9
--- /dev/null
+++ b/vendor/github.com/gizak/termui/render.go
@@ -0,0 +1,164 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "image"
+ "io"
+ "sync"
+ "time"
+
+ "fmt"
+
+ "os"
+
+ "runtime/debug"
+
+ "bytes"
+
+ "github.com/maruel/panicparse/stack"
+ tm "github.com/nsf/termbox-go"
+)
+
+// Bufferer should be implemented by all renderable components.
+type Bufferer interface {
+ Buffer() Buffer
+}
+
+// Init initializes termui library. This function should be called before any others.
+// After initialization, the library must be finalized by 'Close' function.
+func Init() error {
+ if err := tm.Init(); err != nil {
+ return err
+ }
+
+ sysEvtChs = make([]chan Event, 0)
+ go hookTermboxEvt()
+
+ renderJobs = make(chan []Bufferer)
+ //renderLock = new(sync.RWMutex)
+
+ Body = NewGrid()
+ Body.X = 0
+ Body.Y = 0
+ Body.BgColor = ThemeAttr("bg")
+ Body.Width = TermWidth()
+
+ DefaultEvtStream.Init()
+ DefaultEvtStream.Merge("termbox", NewSysEvtCh())
+ DefaultEvtStream.Merge("timer", NewTimerCh(time.Second))
+ DefaultEvtStream.Merge("custom", usrEvtCh)
+
+ DefaultEvtStream.Handle("/", DefaultHandler)
+ DefaultEvtStream.Handle("/sys/wnd/resize", func(e Event) {
+ w := e.Data.(EvtWnd)
+ Body.Width = w.Width
+ })
+
+ DefaultWgtMgr = NewWgtMgr()
+ DefaultEvtStream.Hook(DefaultWgtMgr.WgtHandlersHook())
+
+ go func() {
+ for bs := range renderJobs {
+ render(bs...)
+ }
+ }()
+
+ return nil
+}
+
+// Close finalizes termui library,
+// should be called after successful initialization when termui's functionality isn't required anymore.
+func Close() {
+ tm.Close()
+}
+
+var renderLock sync.Mutex
+
+func termSync() {
+ renderLock.Lock()
+ tm.Sync()
+ termWidth, termHeight = tm.Size()
+ renderLock.Unlock()
+}
+
+// TermWidth returns the current terminal's width.
+func TermWidth() int {
+ termSync()
+ return termWidth
+}
+
+// TermHeight returns the current terminal's height.
+func TermHeight() int {
+ termSync()
+ return termHeight
+}
+
+// Render renders all Bufferer in the given order from left to right,
+// right could overlap on left ones.
+func render(bs ...Bufferer) {
+ defer func() {
+ if e := recover(); e != nil {
+ Close()
+ fmt.Fprintf(os.Stderr, "Captured a panic(value=%v) when rendering Bufferer. Exit termui and clean terminal...\nPrint stack trace:\n\n", e)
+ //debug.PrintStack()
+ gs, err := stack.ParseDump(bytes.NewReader(debug.Stack()), os.Stderr)
+ if err != nil {
+ debug.PrintStack()
+ os.Exit(1)
+ }
+ p := &stack.Palette{}
+ buckets := stack.SortBuckets(stack.Bucketize(gs, stack.AnyValue))
+ srcLen, pkgLen := stack.CalcLengths(buckets, false)
+ for _, bucket := range buckets {
+ io.WriteString(os.Stdout, p.BucketHeader(&bucket, false, len(buckets) > 1))
+ io.WriteString(os.Stdout, p.StackLines(&bucket.Signature, srcLen, pkgLen, false))
+ }
+ os.Exit(1)
+ }
+ }()
+ for _, b := range bs {
+
+ buf := b.Buffer()
+ // set cels in buf
+ for p, c := range buf.CellMap {
+ if p.In(buf.Area) {
+
+ tm.SetCell(p.X, p.Y, c.Ch, toTmAttr(c.Fg), toTmAttr(c.Bg))
+
+ }
+ }
+
+ }
+
+ renderLock.Lock()
+ // render
+ tm.Flush()
+ renderLock.Unlock()
+}
+
+func Clear() {
+ tm.Clear(tm.ColorDefault, toTmAttr(ThemeAttr("bg")))
+}
+
+func clearArea(r image.Rectangle, bg Attribute) {
+ for i := r.Min.X; i < r.Max.X; i++ {
+ for j := r.Min.Y; j < r.Max.Y; j++ {
+ tm.SetCell(i, j, ' ', tm.ColorDefault, toTmAttr(bg))
+ }
+ }
+}
+
+func ClearArea(r image.Rectangle, bg Attribute) {
+ clearArea(r, bg)
+ tm.Flush()
+}
+
+var renderJobs chan []Bufferer
+
+func Render(bs ...Bufferer) {
+ //go func() { renderJobs <- bs }()
+ renderJobs <- bs
+}
diff --git a/vendor/github.com/gizak/termui/sparkline.go b/vendor/github.com/gizak/termui/sparkline.go
new file mode 100644
index 0000000..d906e49
--- /dev/null
+++ b/vendor/github.com/gizak/termui/sparkline.go
@@ -0,0 +1,167 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+// Sparkline is like: ▅▆▂▂▅▇▂▂▃▆▆▆▅▃. The data points should be non-negative integers.
+/*
+ data := []int{4, 2, 1, 6, 3, 9, 1, 4, 2, 15, 14, 9, 8, 6, 10, 13, 15, 12, 10, 5, 3, 6, 1}
+ spl := termui.NewSparkline()
+ spl.Data = data
+ spl.Title = "Sparkline 0"
+ spl.LineColor = termui.ColorGreen
+*/
+type Sparkline struct {
+ Data []int
+ Height int
+ Title string
+ TitleColor Attribute
+ LineColor Attribute
+ displayHeight int
+ scale float32
+ max int
+}
+
+// Sparklines is a renderable widget which groups together the given sparklines.
+/*
+ spls := termui.NewSparklines(spl0,spl1,spl2) //...
+ spls.Height = 2
+ spls.Width = 20
+*/
+type Sparklines struct {
+ Block
+ Lines []Sparkline
+ displayLines int
+ displayWidth int
+}
+
+var sparks = []rune{'▁', '▂', '▃', '▄', '▅', '▆', '▇', '█'}
+
+// Add appends a given Sparkline to s *Sparklines.
+func (s *Sparklines) Add(sl Sparkline) {
+ s.Lines = append(s.Lines, sl)
+}
+
+// NewSparkline returns a unrenderable single sparkline that intended to be added into Sparklines.
+func NewSparkline() Sparkline {
+ return Sparkline{
+ Height: 1,
+ TitleColor: ThemeAttr("sparkline.title.fg"),
+ LineColor: ThemeAttr("sparkline.line.fg")}
+}
+
+// NewSparklines return a new *Spaklines with given Sparkline(s), you can always add a new Sparkline later.
+func NewSparklines(ss ...Sparkline) *Sparklines {
+ s := &Sparklines{Block: *NewBlock(), Lines: ss}
+ return s
+}
+
+func (sl *Sparklines) update() {
+ for i, v := range sl.Lines {
+ if v.Title == "" {
+ sl.Lines[i].displayHeight = v.Height
+ } else {
+ sl.Lines[i].displayHeight = v.Height + 1
+ }
+ }
+ sl.displayWidth = sl.innerArea.Dx()
+
+ // get how many lines gotta display
+ h := 0
+ sl.displayLines = 0
+ for _, v := range sl.Lines {
+ if h+v.displayHeight <= sl.innerArea.Dy() {
+ sl.displayLines++
+ } else {
+ break
+ }
+ h += v.displayHeight
+ }
+
+ for i := 0; i < sl.displayLines; i++ {
+ data := sl.Lines[i].Data
+
+ max := 0
+ for _, v := range data {
+ if max < v {
+ max = v
+ }
+ }
+ sl.Lines[i].max = max
+ if max != 0 {
+ sl.Lines[i].scale = float32(8*sl.Lines[i].Height) / float32(max)
+ } else { // when all negative
+ sl.Lines[i].scale = 0
+ }
+ }
+}
+
+// Buffer implements Bufferer interface.
+func (sl *Sparklines) Buffer() Buffer {
+ buf := sl.Block.Buffer()
+ sl.update()
+
+ oftY := 0
+ for i := 0; i < sl.displayLines; i++ {
+ l := sl.Lines[i]
+ data := l.Data
+
+ if len(data) > sl.innerArea.Dx() {
+ data = data[len(data)-sl.innerArea.Dx():]
+ }
+
+ if l.Title != "" {
+ rs := trimStr2Runes(l.Title, sl.innerArea.Dx())
+ oftX := 0
+ for _, v := range rs {
+ w := charWidth(v)
+ c := Cell{
+ Ch: v,
+ Fg: l.TitleColor,
+ Bg: sl.Bg,
+ }
+ x := sl.innerArea.Min.X + oftX
+ y := sl.innerArea.Min.Y + oftY
+ buf.Set(x, y, c)
+ oftX += w
+ }
+ }
+
+ for j, v := range data {
+ // display height of the data point, zero when data is negative
+ h := int(float32(v)*l.scale + 0.5)
+ if v < 0 {
+ h = 0
+ }
+
+ barCnt := h / 8
+ barMod := h % 8
+ for jj := 0; jj < barCnt; jj++ {
+ c := Cell{
+ Ch: ' ', // => sparks[7]
+ Bg: l.LineColor,
+ }
+ x := sl.innerArea.Min.X + j
+ y := sl.innerArea.Min.Y + oftY + l.Height - jj
+
+ //p.Bg = sl.BgColor
+ buf.Set(x, y, c)
+ }
+ if barMod != 0 {
+ c := Cell{
+ Ch: sparks[barMod-1],
+ Fg: l.LineColor,
+ Bg: sl.Bg,
+ }
+ x := sl.innerArea.Min.X + j
+ y := sl.innerArea.Min.Y + oftY + l.Height - barCnt
+ buf.Set(x, y, c)
+ }
+ }
+
+ oftY += l.displayHeight
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/table.go b/vendor/github.com/gizak/termui/table.go
new file mode 100644
index 0000000..e3d1bbf
--- /dev/null
+++ b/vendor/github.com/gizak/termui/table.go
@@ -0,0 +1,185 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "strings"
+
+/* Table is like:
+
+┌Awesome Table ────────────────────────────────────────────────┐
+│ Col0 | Col1 | Col2 | Col3 | Col4 | Col5 | Col6 |
+│──────────────────────────────────────────────────────────────│
+│ Some Item #1 | AAA | 123 | CCCCC | EEEEE | GGGGG | IIIII |
+│──────────────────────────────────────────────────────────────│
+│ Some Item #2 | BBB | 456 | DDDDD | FFFFF | HHHHH | JJJJJ |
+└──────────────────────────────────────────────────────────────┘
+
+Datapoints are a two dimensional array of strings: [][]string
+
+Example:
+ data := [][]string{
+ {"Col0", "Col1", "Col3", "Col4", "Col5", "Col6"},
+ {"Some Item #1", "AAA", "123", "CCCCC", "EEEEE", "GGGGG", "IIIII"},
+ {"Some Item #2", "BBB", "456", "DDDDD", "FFFFF", "HHHHH", "JJJJJ"},
+ }
+
+ table := termui.NewTable()
+ table.Rows = data // type [][]string
+ table.FgColor = termui.ColorWhite
+ table.BgColor = termui.ColorDefault
+ table.Height = 7
+ table.Width = 62
+ table.Y = 0
+ table.X = 0
+ table.Border = true
+*/
+
+// Table tracks all the attributes of a Table instance
+type Table struct {
+ Block
+ Rows [][]string
+ CellWidth []int
+ FgColor Attribute
+ BgColor Attribute
+ FgColors []Attribute
+ BgColors []Attribute
+ Separator bool
+ TextAlign Align
+}
+
+// NewTable returns a new Table instance
+func NewTable() *Table {
+ table := &Table{Block: *NewBlock()}
+ table.FgColor = ColorWhite
+ table.BgColor = ColorDefault
+ table.Separator = true
+ return table
+}
+
+// CellsWidth calculates the width of a cell array and returns an int
+func cellsWidth(cells []Cell) int {
+ width := 0
+ for _, c := range cells {
+ width += c.Width()
+ }
+ return width
+}
+
+// Analysis generates and returns an array of []Cell that represent all columns in the Table
+func (table *Table) Analysis() [][]Cell {
+ var rowCells [][]Cell
+ length := len(table.Rows)
+ if length < 1 {
+ return rowCells
+ }
+
+ if len(table.FgColors) == 0 {
+ table.FgColors = make([]Attribute, len(table.Rows))
+ }
+ if len(table.BgColors) == 0 {
+ table.BgColors = make([]Attribute, len(table.Rows))
+ }
+
+ cellWidths := make([]int, len(table.Rows[0]))
+
+ for y, row := range table.Rows {
+ if table.FgColors[y] == 0 {
+ table.FgColors[y] = table.FgColor
+ }
+ if table.BgColors[y] == 0 {
+ table.BgColors[y] = table.BgColor
+ }
+ for x, str := range row {
+ cells := DefaultTxBuilder.Build(str, table.FgColors[y], table.BgColors[y])
+ cw := cellsWidth(cells)
+ if cellWidths[x] < cw {
+ cellWidths[x] = cw
+ }
+ rowCells = append(rowCells, cells)
+ }
+ }
+ table.CellWidth = cellWidths
+ return rowCells
+}
+
+// SetSize calculates the table size and sets the internal value
+func (table *Table) SetSize() {
+ length := len(table.Rows)
+ if table.Separator {
+ table.Height = length*2 + 1
+ } else {
+ table.Height = length + 2
+ }
+ table.Width = 2
+ if length != 0 {
+ for _, cellWidth := range table.CellWidth {
+ table.Width += cellWidth + 3
+ }
+ }
+}
+
+// CalculatePosition ...
+func (table *Table) CalculatePosition(x int, y int, coordinateX *int, coordinateY *int, cellStart *int) {
+ if table.Separator {
+ *coordinateY = table.innerArea.Min.Y + y*2
+ } else {
+ *coordinateY = table.innerArea.Min.Y + y
+ }
+ if x == 0 {
+ *cellStart = table.innerArea.Min.X
+ } else {
+ *cellStart += table.CellWidth[x-1] + 3
+ }
+
+ switch table.TextAlign {
+ case AlignRight:
+ *coordinateX = *cellStart + (table.CellWidth[x] - len(table.Rows[y][x])) + 2
+ case AlignCenter:
+ *coordinateX = *cellStart + (table.CellWidth[x]-len(table.Rows[y][x]))/2 + 2
+ default:
+ *coordinateX = *cellStart + 2
+ }
+}
+
+// Buffer ...
+func (table *Table) Buffer() Buffer {
+ buffer := table.Block.Buffer()
+ rowCells := table.Analysis()
+ pointerX := table.innerArea.Min.X + 2
+ pointerY := table.innerArea.Min.Y
+ borderPointerX := table.innerArea.Min.X
+ for y, row := range table.Rows {
+ for x := range row {
+ table.CalculatePosition(x, y, &pointerX, &pointerY, &borderPointerX)
+ background := DefaultTxBuilder.Build(strings.Repeat(" ", table.CellWidth[x]+3), table.BgColors[y], table.BgColors[y])
+ cells := rowCells[y*len(row)+x]
+ for i, back := range background {
+ buffer.Set(borderPointerX+i, pointerY, back)
+ }
+
+ coordinateX := pointerX
+ for _, printer := range cells {
+ buffer.Set(coordinateX, pointerY, printer)
+ coordinateX += printer.Width()
+ }
+
+ if x != 0 {
+ dividors := DefaultTxBuilder.Build("|", table.FgColors[y], table.BgColors[y])
+ for _, dividor := range dividors {
+ buffer.Set(borderPointerX, pointerY, dividor)
+ }
+ }
+ }
+
+ if table.Separator {
+ border := DefaultTxBuilder.Build(strings.Repeat("─", table.Width-2), table.FgColor, table.BgColor)
+ for i, cell := range border {
+ buffer.Set(i+1, pointerY+1, cell)
+ }
+ }
+ }
+
+ return buffer
+}
diff --git a/vendor/github.com/gizak/termui/textbuilder.go b/vendor/github.com/gizak/termui/textbuilder.go
new file mode 100644
index 0000000..12e2055
--- /dev/null
+++ b/vendor/github.com/gizak/termui/textbuilder.go
@@ -0,0 +1,278 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/mitchellh/go-wordwrap"
+)
+
+// TextBuilder is a minimal interface to produce text []Cell using specific syntax (markdown).
+type TextBuilder interface {
+ Build(s string, fg, bg Attribute) []Cell
+}
+
+// DefaultTxBuilder is set to be MarkdownTxBuilder.
+var DefaultTxBuilder = NewMarkdownTxBuilder()
+
+// MarkdownTxBuilder implements TextBuilder interface, using markdown syntax.
+type MarkdownTxBuilder struct {
+ baseFg Attribute
+ baseBg Attribute
+ plainTx []rune
+ markers []marker
+}
+
+type marker struct {
+ st int
+ ed int
+ fg Attribute
+ bg Attribute
+}
+
+var colorMap = map[string]Attribute{
+ "red": ColorRed,
+ "blue": ColorBlue,
+ "black": ColorBlack,
+ "cyan": ColorCyan,
+ "yellow": ColorYellow,
+ "white": ColorWhite,
+ "default": ColorDefault,
+ "green": ColorGreen,
+ "magenta": ColorMagenta,
+}
+
+var attrMap = map[string]Attribute{
+ "bold": AttrBold,
+ "underline": AttrUnderline,
+ "reverse": AttrReverse,
+}
+
+func rmSpc(s string) string {
+ reg := regexp.MustCompile(`\s+`)
+ return reg.ReplaceAllString(s, "")
+}
+
+// readAttr translates strings like `fg-red,fg-bold,bg-white` to fg and bg Attribute
+func (mtb MarkdownTxBuilder) readAttr(s string) (Attribute, Attribute) {
+ fg := mtb.baseFg
+ bg := mtb.baseBg
+
+ updateAttr := func(a Attribute, attrs []string) Attribute {
+ for _, s := range attrs {
+ // replace the color
+ if c, ok := colorMap[s]; ok {
+ a &= 0xFF00 // erase clr 0 ~ 8 bits
+ a |= c // set clr
+ }
+ // add attrs
+ if c, ok := attrMap[s]; ok {
+ a |= c
+ }
+ }
+ return a
+ }
+
+ ss := strings.Split(s, ",")
+ fgs := []string{}
+ bgs := []string{}
+ for _, v := range ss {
+ subs := strings.Split(v, "-")
+ if len(subs) > 1 {
+ if subs[0] == "fg" {
+ fgs = append(fgs, subs[1])
+ }
+ if subs[0] == "bg" {
+ bgs = append(bgs, subs[1])
+ }
+ }
+ }
+
+ fg = updateAttr(fg, fgs)
+ bg = updateAttr(bg, bgs)
+ return fg, bg
+}
+
+func (mtb *MarkdownTxBuilder) reset() {
+ mtb.plainTx = []rune{}
+ mtb.markers = []marker{}
+}
+
+// parse streams and parses text into normalized text and render sequence.
+func (mtb *MarkdownTxBuilder) parse(str string) {
+ rs := str2runes(str)
+ normTx := []rune{}
+ square := []rune{}
+ brackt := []rune{}
+ accSquare := false
+ accBrackt := false
+ cntSquare := 0
+
+ reset := func() {
+ square = []rune{}
+ brackt = []rune{}
+ accSquare = false
+ accBrackt = false
+ cntSquare = 0
+ }
+ // pipe stacks into normTx and clear
+ rollback := func() {
+ normTx = append(normTx, square...)
+ normTx = append(normTx, brackt...)
+ reset()
+ }
+ // chop first and last
+ chop := func(s []rune) []rune {
+ return s[1 : len(s)-1]
+ }
+
+ for i, r := range rs {
+ switch {
+ // stacking brackt
+ case accBrackt:
+ brackt = append(brackt, r)
+ if ')' == r {
+ fg, bg := mtb.readAttr(string(chop(brackt)))
+ st := len(normTx)
+ ed := len(normTx) + len(square) - 2
+ mtb.markers = append(mtb.markers, marker{st, ed, fg, bg})
+ normTx = append(normTx, chop(square)...)
+ reset()
+ } else if i+1 == len(rs) {
+ rollback()
+ }
+ // stacking square
+ case accSquare:
+ switch {
+ // squares closed and followed by a '('
+ case cntSquare == 0 && '(' == r:
+ accBrackt = true
+ brackt = append(brackt, '(')
+ // squares closed but not followed by a '('
+ case cntSquare == 0:
+ rollback()
+ if '[' == r {
+ accSquare = true
+ cntSquare = 1
+ brackt = append(brackt, '[')
+ } else {
+ normTx = append(normTx, r)
+ }
+ // hit the end
+ case i+1 == len(rs):
+ square = append(square, r)
+ rollback()
+ case '[' == r:
+ cntSquare++
+ square = append(square, '[')
+ case ']' == r:
+ cntSquare--
+ square = append(square, ']')
+ // normal char
+ default:
+ square = append(square, r)
+ }
+ // stacking normTx
+ default:
+ if '[' == r {
+ accSquare = true
+ cntSquare = 1
+ square = append(square, '[')
+ } else {
+ normTx = append(normTx, r)
+ }
+ }
+ }
+
+ mtb.plainTx = normTx
+}
+
+func wrapTx(cs []Cell, wl int) []Cell {
+ tmpCell := make([]Cell, len(cs))
+ copy(tmpCell, cs)
+
+ // get the plaintext
+ plain := CellsToStr(cs)
+
+ // wrap
+ plainWrapped := wordwrap.WrapString(plain, uint(wl))
+
+ // find differences and insert
+ finalCell := tmpCell // finalcell will get the inserts and is what is returned
+
+ plainRune := []rune(plain)
+ plainWrappedRune := []rune(plainWrapped)
+ trigger := "go"
+ plainRuneNew := plainRune
+
+ for trigger != "stop" {
+ plainRune = plainRuneNew
+ for i := range plainRune {
+ if plainRune[i] == plainWrappedRune[i] {
+ trigger = "stop"
+ } else if plainRune[i] != plainWrappedRune[i] && plainWrappedRune[i] == 10 {
+ trigger = "go"
+ cell := Cell{10, 0, 0}
+ j := i - 0
+
+ // insert a cell into the []Cell in correct position
+ tmpCell[i] = cell
+
+ // insert the newline into plain so we avoid indexing errors
+ plainRuneNew = append(plainRune, 10)
+ copy(plainRuneNew[j+1:], plainRuneNew[j:])
+ plainRuneNew[j] = plainWrappedRune[j]
+
+ // restart the inner for loop until plain and plain wrapped are
+ // the same; yeah, it's inefficient, but the text amounts
+ // should be small
+ break
+
+ } else if plainRune[i] != plainWrappedRune[i] &&
+ plainWrappedRune[i-1] == 10 && // if the prior rune is a newline
+ plainRune[i] == 32 { // and this rune is a space
+ trigger = "go"
+ // need to delete plainRune[i] because it gets rid of an extra
+ // space
+ plainRuneNew = append(plainRune[:i], plainRune[i+1:]...)
+ break
+
+ } else {
+ trigger = "stop" // stops the outer for loop
+ }
+ }
+ }
+
+ finalCell = tmpCell
+
+ return finalCell
+}
+
+// Build implements TextBuilder interface.
+func (mtb MarkdownTxBuilder) Build(s string, fg, bg Attribute) []Cell {
+ mtb.baseFg = fg
+ mtb.baseBg = bg
+ mtb.reset()
+ mtb.parse(s)
+ cs := make([]Cell, len(mtb.plainTx))
+ for i := range cs {
+ cs[i] = Cell{Ch: mtb.plainTx[i], Fg: fg, Bg: bg}
+ }
+ for _, mrk := range mtb.markers {
+ for i := mrk.st; i < mrk.ed; i++ {
+ cs[i].Fg = mrk.fg
+ cs[i].Bg = mrk.bg
+ }
+ }
+
+ return cs
+}
+
+// NewMarkdownTxBuilder returns a TextBuilder employing markdown syntax.
+func NewMarkdownTxBuilder() TextBuilder {
+ return MarkdownTxBuilder{}
+}
diff --git a/vendor/github.com/gizak/termui/theme.go b/vendor/github.com/gizak/termui/theme.go
new file mode 100644
index 0000000..21fb3bf
--- /dev/null
+++ b/vendor/github.com/gizak/termui/theme.go
@@ -0,0 +1,140 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "strings"
+
+/*
+// A ColorScheme represents the current look-and-feel of the dashboard.
+type ColorScheme struct {
+ BodyBg Attribute
+ BlockBg Attribute
+ HasBorder bool
+ BorderFg Attribute
+ BorderBg Attribute
+ BorderLabelTextFg Attribute
+ BorderLabelTextBg Attribute
+ ParTextFg Attribute
+ ParTextBg Attribute
+ SparklineLine Attribute
+ SparklineTitle Attribute
+ GaugeBar Attribute
+ GaugePercent Attribute
+ LineChartLine Attribute
+ LineChartAxes Attribute
+ ListItemFg Attribute
+ ListItemBg Attribute
+ BarChartBar Attribute
+ BarChartText Attribute
+ BarChartNum Attribute
+ MBarChartBar Attribute
+ MBarChartText Attribute
+ MBarChartNum Attribute
+ TabActiveBg Attribute
+}
+
+// default color scheme depends on the user's terminal setting.
+var themeDefault = ColorScheme{HasBorder: true}
+
+var themeHelloWorld = ColorScheme{
+ BodyBg: ColorBlack,
+ BlockBg: ColorBlack,
+ HasBorder: true,
+ BorderFg: ColorWhite,
+ BorderBg: ColorBlack,
+ BorderLabelTextBg: ColorBlack,
+ BorderLabelTextFg: ColorGreen,
+ ParTextBg: ColorBlack,
+ ParTextFg: ColorWhite,
+ SparklineLine: ColorMagenta,
+ SparklineTitle: ColorWhite,
+ GaugeBar: ColorRed,
+ GaugePercent: ColorWhite,
+ LineChartLine: ColorYellow | AttrBold,
+ LineChartAxes: ColorWhite,
+ ListItemBg: ColorBlack,
+ ListItemFg: ColorYellow,
+ BarChartBar: ColorRed,
+ BarChartNum: ColorWhite,
+ BarChartText: ColorCyan,
+ MBarChartBar: ColorRed,
+ MBarChartNum: ColorWhite,
+ MBarChartText: ColorCyan,
+ TabActiveBg: ColorMagenta,
+}
+
+var theme = themeDefault // global dep
+
+// Theme returns the currently used theme.
+func Theme() ColorScheme {
+ return theme
+}
+
+// SetTheme sets a new, custom theme.
+func SetTheme(newTheme ColorScheme) {
+ theme = newTheme
+}
+
+// UseTheme sets a predefined scheme. Currently available: "hello-world" and
+// "black-and-white".
+func UseTheme(th string) {
+ switch th {
+ case "helloworld":
+ theme = themeHelloWorld
+ default:
+ theme = themeDefault
+ }
+}
+*/
+
+var ColorMap = map[string]Attribute{
+ "fg": ColorWhite,
+ "bg": ColorDefault,
+ "border.fg": ColorWhite,
+ "label.fg": ColorGreen,
+ "par.fg": ColorYellow,
+ "par.label.bg": ColorWhite,
+}
+
+func ThemeAttr(name string) Attribute {
+ return lookUpAttr(ColorMap, name)
+}
+
+func lookUpAttr(clrmap map[string]Attribute, name string) Attribute {
+
+ a, ok := clrmap[name]
+ if ok {
+ return a
+ }
+
+ ns := strings.Split(name, ".")
+ for i := range ns {
+ nn := strings.Join(ns[i:len(ns)], ".")
+ a, ok = ColorMap[nn]
+ if ok {
+ break
+ }
+ }
+
+ return a
+}
+
+// 0<=r,g,b <= 5
+func ColorRGB(r, g, b int) Attribute {
+ within := func(n int) int {
+ if n < 0 {
+ return 0
+ }
+
+ if n > 5 {
+ return 5
+ }
+
+ return n
+ }
+
+ r, b, g = within(r), within(b), within(g)
+ return Attribute(0x0f + 36*r + 6*g + b)
+}
diff --git a/vendor/github.com/gizak/termui/widget.go b/vendor/github.com/gizak/termui/widget.go
new file mode 100644
index 0000000..80276bf
--- /dev/null
+++ b/vendor/github.com/gizak/termui/widget.go
@@ -0,0 +1,94 @@
+// Copyright 2017 Zack Guo . All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "fmt"
+ "sync"
+)
+
+// event mixins
+type WgtMgr map[string]WgtInfo
+
+type WgtInfo struct {
+ Handlers map[string]func(Event)
+ WgtRef Widget
+ Id string
+}
+
+type Widget interface {
+ Id() string
+}
+
+func NewWgtInfo(wgt Widget) WgtInfo {
+ return WgtInfo{
+ Handlers: make(map[string]func(Event)),
+ WgtRef: wgt,
+ Id: wgt.Id(),
+ }
+}
+
+func NewWgtMgr() WgtMgr {
+ wm := WgtMgr(make(map[string]WgtInfo))
+ return wm
+
+}
+
+func (wm WgtMgr) AddWgt(wgt Widget) {
+ wm[wgt.Id()] = NewWgtInfo(wgt)
+}
+
+func (wm WgtMgr) RmWgt(wgt Widget) {
+ wm.RmWgtById(wgt.Id())
+}
+
+func (wm WgtMgr) RmWgtById(id string) {
+ delete(wm, id)
+}
+
+func (wm WgtMgr) AddWgtHandler(id, path string, h func(Event)) {
+ if w, ok := wm[id]; ok {
+ w.Handlers[path] = h
+ }
+}
+
+func (wm WgtMgr) RmWgtHandler(id, path string) {
+ if w, ok := wm[id]; ok {
+ delete(w.Handlers, path)
+ }
+}
+
+var counter struct {
+ sync.RWMutex
+ count int
+}
+
+func GenId() string {
+ counter.Lock()
+ defer counter.Unlock()
+
+ counter.count += 1
+ return fmt.Sprintf("%d", counter.count)
+}
+
+func (wm WgtMgr) WgtHandlersHook() func(Event) {
+ return func(e Event) {
+ for _, v := range wm {
+ if k := findMatch(v.Handlers, e.Path); k != "" {
+ v.Handlers[k](e)
+ }
+ }
+ }
+}
+
+var DefaultWgtMgr WgtMgr
+
+func (b *Block) Handle(path string, handler func(Event)) {
+ if _, ok := DefaultWgtMgr[b.Id()]; !ok {
+ DefaultWgtMgr.AddWgt(b)
+ }
+
+ DefaultWgtMgr.AddWgtHandler(b.Id(), path, handler)
+}
diff --git a/vendor/github.com/google/go-github/LICENSE b/vendor/github.com/google/go-github/LICENSE
new file mode 100644
index 0000000..5582e4a
--- /dev/null
+++ b/vendor/github.com/google/go-github/LICENSE
@@ -0,0 +1,341 @@
+Copyright (c) 2013 The go-github AUTHORS. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------
+
+Some documentation is taken from the GitHub Developer site
+, which is available under the following Creative
+Commons Attribution 3.0 License. This applies only to the go-github source
+code and would not apply to any compiled binaries.
+
+THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
+COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
+COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
+AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+
+BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
+TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY
+BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
+CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
+CONDITIONS.
+
+1. Definitions
+
+ a. "Adaptation" means a work based upon the Work, or upon the Work and
+ other pre-existing works, such as a translation, adaptation,
+ derivative work, arrangement of music or other alterations of a
+ literary or artistic work, or phonogram or performance and includes
+ cinematographic adaptations or any other form in which the Work may be
+ recast, transformed, or adapted including in any form recognizably
+ derived from the original, except that a work that constitutes a
+ Collection will not be considered an Adaptation for the purpose of
+ this License. For the avoidance of doubt, where the Work is a musical
+ work, performance or phonogram, the synchronization of the Work in
+ timed-relation with a moving image ("synching") will be considered an
+ Adaptation for the purpose of this License.
+ b. "Collection" means a collection of literary or artistic works, such as
+ encyclopedias and anthologies, or performances, phonograms or
+ broadcasts, or other works or subject matter other than works listed
+ in Section 1(f) below, which, by reason of the selection and
+ arrangement of their contents, constitute intellectual creations, in
+ which the Work is included in its entirety in unmodified form along
+ with one or more other contributions, each constituting separate and
+ independent works in themselves, which together are assembled into a
+ collective whole. A work that constitutes a Collection will not be
+ considered an Adaptation (as defined above) for the purposes of this
+ License.
+ c. "Distribute" means to make available to the public the original and
+ copies of the Work or Adaptation, as appropriate, through sale or
+ other transfer of ownership.
+ d. "Licensor" means the individual, individuals, entity or entities that
+ offer(s) the Work under the terms of this License.
+ e. "Original Author" means, in the case of a literary or artistic work,
+ the individual, individuals, entity or entities who created the Work
+ or if no individual or entity can be identified, the publisher; and in
+ addition (i) in the case of a performance the actors, singers,
+ musicians, dancers, and other persons who act, sing, deliver, declaim,
+ play in, interpret or otherwise perform literary or artistic works or
+ expressions of folklore; (ii) in the case of a phonogram the producer
+ being the person or legal entity who first fixes the sounds of a
+ performance or other sounds; and, (iii) in the case of broadcasts, the
+ organization that transmits the broadcast.
+ f. "Work" means the literary and/or artistic work offered under the terms
+ of this License including without limitation any production in the
+ literary, scientific and artistic domain, whatever may be the mode or
+ form of its expression including digital form, such as a book,
+ pamphlet and other writing; a lecture, address, sermon or other work
+ of the same nature; a dramatic or dramatico-musical work; a
+ choreographic work or entertainment in dumb show; a musical
+ composition with or without words; a cinematographic work to which are
+ assimilated works expressed by a process analogous to cinematography;
+ a work of drawing, painting, architecture, sculpture, engraving or
+ lithography; a photographic work to which are assimilated works
+ expressed by a process analogous to photography; a work of applied
+ art; an illustration, map, plan, sketch or three-dimensional work
+ relative to geography, topography, architecture or science; a
+ performance; a broadcast; a phonogram; a compilation of data to the
+ extent it is protected as a copyrightable work; or a work performed by
+ a variety or circus performer to the extent it is not otherwise
+ considered a literary or artistic work.
+ g. "You" means an individual or entity exercising rights under this
+ License who has not previously violated the terms of this License with
+ respect to the Work, or who has received express permission from the
+ Licensor to exercise rights under this License despite a previous
+ violation.
+ h. "Publicly Perform" means to perform public recitations of the Work and
+ to communicate to the public those public recitations, by any means or
+ process, including by wire or wireless means or public digital
+ performances; to make available to the public Works in such a way that
+ members of the public may access these Works from a place and at a
+ place individually chosen by them; to perform the Work to the public
+ by any means or process and the communication to the public of the
+ performances of the Work, including by public digital performance; to
+ broadcast and rebroadcast the Work by any means including signs,
+ sounds or images.
+ i. "Reproduce" means to make copies of the Work by any means including
+ without limitation by sound or visual recordings and the right of
+ fixation and reproducing fixations of the Work, including storage of a
+ protected performance or phonogram in digital form or other electronic
+ medium.
+
+2. Fair Dealing Rights. Nothing in this License is intended to reduce,
+limit, or restrict any uses free from copyright or rights arising from
+limitations or exceptions that are provided for in connection with the
+copyright protection under copyright law or other applicable laws.
+
+3. License Grant. Subject to the terms and conditions of this License,
+Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
+perpetual (for the duration of the applicable copyright) license to
+exercise the rights in the Work as stated below:
+
+ a. to Reproduce the Work, to incorporate the Work into one or more
+ Collections, and to Reproduce the Work as incorporated in the
+ Collections;
+ b. to create and Reproduce Adaptations provided that any such Adaptation,
+ including any translation in any medium, takes reasonable steps to
+ clearly label, demarcate or otherwise identify that changes were made
+ to the original Work. For example, a translation could be marked "The
+ original work was translated from English to Spanish," or a
+ modification could indicate "The original work has been modified.";
+ c. to Distribute and Publicly Perform the Work including as incorporated
+ in Collections; and,
+ d. to Distribute and Publicly Perform Adaptations.
+ e. For the avoidance of doubt:
+
+ i. Non-waivable Compulsory License Schemes. In those jurisdictions in
+ which the right to collect royalties through any statutory or
+ compulsory licensing scheme cannot be waived, the Licensor
+ reserves the exclusive right to collect such royalties for any
+ exercise by You of the rights granted under this License;
+ ii. Waivable Compulsory License Schemes. In those jurisdictions in
+ which the right to collect royalties through any statutory or
+ compulsory licensing scheme can be waived, the Licensor waives the
+ exclusive right to collect such royalties for any exercise by You
+ of the rights granted under this License; and,
+ iii. Voluntary License Schemes. The Licensor waives the right to
+ collect royalties, whether individually or, in the event that the
+ Licensor is a member of a collecting society that administers
+ voluntary licensing schemes, via that society, from any exercise
+ by You of the rights granted under this License.
+
+The above rights may be exercised in all media and formats whether now
+known or hereafter devised. The above rights include the right to make
+such modifications as are technically necessary to exercise the rights in
+other media and formats. Subject to Section 8(f), all rights not expressly
+granted by Licensor are hereby reserved.
+
+4. Restrictions. The license granted in Section 3 above is expressly made
+subject to and limited by the following restrictions:
+
+ a. You may Distribute or Publicly Perform the Work only under the terms
+ of this License. You must include a copy of, or the Uniform Resource
+ Identifier (URI) for, this License with every copy of the Work You
+ Distribute or Publicly Perform. You may not offer or impose any terms
+ on the Work that restrict the terms of this License or the ability of
+ the recipient of the Work to exercise the rights granted to that
+ recipient under the terms of the License. You may not sublicense the
+ Work. You must keep intact all notices that refer to this License and
+ to the disclaimer of warranties with every copy of the Work You
+ Distribute or Publicly Perform. When You Distribute or Publicly
+ Perform the Work, You may not impose any effective technological
+ measures on the Work that restrict the ability of a recipient of the
+ Work from You to exercise the rights granted to that recipient under
+ the terms of the License. This Section 4(a) applies to the Work as
+ incorporated in a Collection, but this does not require the Collection
+ apart from the Work itself to be made subject to the terms of this
+ License. If You create a Collection, upon notice from any Licensor You
+ must, to the extent practicable, remove from the Collection any credit
+ as required by Section 4(b), as requested. If You create an
+ Adaptation, upon notice from any Licensor You must, to the extent
+ practicable, remove from the Adaptation any credit as required by
+ Section 4(b), as requested.
+ b. If You Distribute, or Publicly Perform the Work or any Adaptations or
+ Collections, You must, unless a request has been made pursuant to
+ Section 4(a), keep intact all copyright notices for the Work and
+ provide, reasonable to the medium or means You are utilizing: (i) the
+ name of the Original Author (or pseudonym, if applicable) if supplied,
+ and/or if the Original Author and/or Licensor designate another party
+ or parties (e.g., a sponsor institute, publishing entity, journal) for
+ attribution ("Attribution Parties") in Licensor's copyright notice,
+ terms of service or by other reasonable means, the name of such party
+ or parties; (ii) the title of the Work if supplied; (iii) to the
+ extent reasonably practicable, the URI, if any, that Licensor
+ specifies to be associated with the Work, unless such URI does not
+ refer to the copyright notice or licensing information for the Work;
+ and (iv) , consistent with Section 3(b), in the case of an Adaptation,
+ a credit identifying the use of the Work in the Adaptation (e.g.,
+ "French translation of the Work by Original Author," or "Screenplay
+ based on original Work by Original Author"). The credit required by
+ this Section 4 (b) may be implemented in any reasonable manner;
+ provided, however, that in the case of a Adaptation or Collection, at
+ a minimum such credit will appear, if a credit for all contributing
+ authors of the Adaptation or Collection appears, then as part of these
+ credits and in a manner at least as prominent as the credits for the
+ other contributing authors. For the avoidance of doubt, You may only
+ use the credit required by this Section for the purpose of attribution
+ in the manner set out above and, by exercising Your rights under this
+ License, You may not implicitly or explicitly assert or imply any
+ connection with, sponsorship or endorsement by the Original Author,
+ Licensor and/or Attribution Parties, as appropriate, of You or Your
+ use of the Work, without the separate, express prior written
+ permission of the Original Author, Licensor and/or Attribution
+ Parties.
+ c. Except as otherwise agreed in writing by the Licensor or as may be
+ otherwise permitted by applicable law, if You Reproduce, Distribute or
+ Publicly Perform the Work either by itself or as part of any
+ Adaptations or Collections, You must not distort, mutilate, modify or
+ take other derogatory action in relation to the Work which would be
+ prejudicial to the Original Author's honor or reputation. Licensor
+ agrees that in those jurisdictions (e.g. Japan), in which any exercise
+ of the right granted in Section 3(b) of this License (the right to
+ make Adaptations) would be deemed to be a distortion, mutilation,
+ modification or other derogatory action prejudicial to the Original
+ Author's honor and reputation, the Licensor will waive or not assert,
+ as appropriate, this Section, to the fullest extent permitted by the
+ applicable national law, to enable You to reasonably exercise Your
+ right under Section 3(b) of this License (right to make Adaptations)
+ but not otherwise.
+
+5. Representations, Warranties and Disclaimer
+
+UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
+OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
+KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
+INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
+FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
+LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
+WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
+OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
+
+6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE
+LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR
+ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES
+ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS
+BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+7. Termination
+
+ a. This License and the rights granted hereunder will terminate
+ automatically upon any breach by You of the terms of this License.
+ Individuals or entities who have received Adaptations or Collections
+ from You under this License, however, will not have their licenses
+ terminated provided such individuals or entities remain in full
+ compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will
+ survive any termination of this License.
+ b. Subject to the above terms and conditions, the license granted here is
+ perpetual (for the duration of the applicable copyright in the Work).
+ Notwithstanding the above, Licensor reserves the right to release the
+ Work under different license terms or to stop distributing the Work at
+ any time; provided, however that any such election will not serve to
+ withdraw this License (or any other license that has been, or is
+ required to be, granted under the terms of this License), and this
+ License will continue in full force and effect unless terminated as
+ stated above.
+
+8. Miscellaneous
+
+ a. Each time You Distribute or Publicly Perform the Work or a Collection,
+ the Licensor offers to the recipient a license to the Work on the same
+ terms and conditions as the license granted to You under this License.
+ b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
+ offers to the recipient a license to the original Work on the same
+ terms and conditions as the license granted to You under this License.
+ c. If any provision of this License is invalid or unenforceable under
+ applicable law, it shall not affect the validity or enforceability of
+ the remainder of the terms of this License, and without further action
+ by the parties to this agreement, such provision shall be reformed to
+ the minimum extent necessary to make such provision valid and
+ enforceable.
+ d. No term or provision of this License shall be deemed waived and no
+ breach consented to unless such waiver or consent shall be in writing
+ and signed by the party to be charged with such waiver or consent.
+ e. This License constitutes the entire agreement between the parties with
+ respect to the Work licensed here. There are no understandings,
+ agreements or representations with respect to the Work not specified
+ here. Licensor shall not be bound by any additional provisions that
+ may appear in any communication from You. This License may not be
+ modified without the mutual written agreement of the Licensor and You.
+ f. The rights granted under, and the subject matter referenced, in this
+ License were drafted utilizing the terminology of the Berne Convention
+ for the Protection of Literary and Artistic Works (as amended on
+ September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
+ Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996
+ and the Universal Copyright Convention (as revised on July 24, 1971).
+ These rights and subject matter take effect in the relevant
+ jurisdiction in which the License terms are sought to be enforced
+ according to the corresponding provisions of the implementation of
+ those treaty provisions in the applicable national law. If the
+ standard suite of rights granted under applicable copyright law
+ includes additional rights not granted under this License, such
+ additional rights are deemed to be included in the License; this
+ License is not intended to restrict the license of any rights under
+ applicable law.
+
+
+Creative Commons Notice
+
+ Creative Commons is not a party to this License, and makes no warranty
+ whatsoever in connection with the Work. Creative Commons will not be
+ liable to You or any party on any legal theory for any damages
+ whatsoever, including without limitation any general, special,
+ incidental or consequential damages arising in connection to this
+ license. Notwithstanding the foregoing two (2) sentences, if Creative
+ Commons has expressly identified itself as the Licensor hereunder, it
+ shall have all rights and obligations of Licensor.
+
+ Except for the limited purpose of indicating to the public that the
+ Work is licensed under the CCPL, Creative Commons does not authorize
+ the use by either party of the trademark "Creative Commons" or any
+ related trademark or logo of Creative Commons without the prior
+ written consent of Creative Commons. Any permitted use will be in
+ compliance with Creative Commons' then-current trademark usage
+ guidelines, as may be published on its website or otherwise made
+ available upon request from time to time. For the avoidance of doubt,
+ this trademark restriction does not form part of this License.
+
+ Creative Commons may be contacted at http://creativecommons.org/.
diff --git a/vendor/github.com/google/go-github/github/activity.go b/vendor/github.com/google/go-github/github/activity.go
new file mode 100644
index 0000000..d719ebb
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/activity.go
@@ -0,0 +1,67 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+// ActivityService handles communication with the activity related
+// methods of the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/
+type ActivityService service
+
+// FeedLink represents a link to a related resource.
+type FeedLink struct {
+ HRef *string `json:"href,omitempty"`
+ Type *string `json:"type,omitempty"`
+}
+
+// Feeds represents timeline resources in Atom format.
+type Feeds struct {
+ TimelineURL *string `json:"timeline_url,omitempty"`
+ UserURL *string `json:"user_url,omitempty"`
+ CurrentUserPublicURL *string `json:"current_user_public_url,omitempty"`
+ CurrentUserURL *string `json:"current_user_url,omitempty"`
+ CurrentUserActorURL *string `json:"current_user_actor_url,omitempty"`
+ CurrentUserOrganizationURL *string `json:"current_user_organization_url,omitempty"`
+ CurrentUserOrganizationURLs []string `json:"current_user_organization_urls,omitempty"`
+ Links *struct {
+ Timeline *FeedLink `json:"timeline,omitempty"`
+ User *FeedLink `json:"user,omitempty"`
+ CurrentUserPublic *FeedLink `json:"current_user_public,omitempty"`
+ CurrentUser *FeedLink `json:"current_user,omitempty"`
+ CurrentUserActor *FeedLink `json:"current_user_actor,omitempty"`
+ CurrentUserOrganization *FeedLink `json:"current_user_organization,omitempty"`
+ CurrentUserOrganizations []FeedLink `json:"current_user_organizations,omitempty"`
+ } `json:"_links,omitempty"`
+}
+
+// ListFeeds lists all the feeds available to the authenticated user.
+//
+// GitHub provides several timeline resources in Atom format:
+// Timeline: The GitHub global public timeline
+// User: The public timeline for any user, using URI template
+// Current user public: The public timeline for the authenticated user
+// Current user: The private timeline for the authenticated user
+// Current user actor: The private timeline for activity created by the
+// authenticated user
+// Current user organizations: The private timeline for the organizations
+// the authenticated user is a member of.
+//
+// Note: Private feeds are only returned when authenticating via Basic Auth
+// since current feed URIs use the older, non revocable auth tokens.
+func (s *ActivityService) ListFeeds() (*Feeds, *Response, error) {
+ req, err := s.client.NewRequest("GET", "feeds", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ f := &Feeds{}
+ resp, err := s.client.Do(req, f)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return f, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/github/activity_events.go b/vendor/github.com/google/go-github/github/activity_events.go
new file mode 100644
index 0000000..31c2ccf
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/activity_events.go
@@ -0,0 +1,301 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// Event represents a GitHub event.
+type Event struct {
+ Type *string `json:"type,omitempty"`
+ Public *bool `json:"public"`
+ RawPayload *json.RawMessage `json:"payload,omitempty"`
+ Repo *Repository `json:"repo,omitempty"`
+ Actor *User `json:"actor,omitempty"`
+ Org *Organization `json:"org,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ ID *string `json:"id,omitempty"`
+}
+
+func (e Event) String() string {
+ return Stringify(e)
+}
+
+// Payload returns the parsed event payload. For recognized event types,
+// a value of the corresponding struct type will be returned.
+func (e *Event) Payload() (payload interface{}) {
+ switch *e.Type {
+ case "CommitCommentEvent":
+ payload = &CommitCommentEvent{}
+ case "CreateEvent":
+ payload = &CreateEvent{}
+ case "DeleteEvent":
+ payload = &DeleteEvent{}
+ case "DeploymentEvent":
+ payload = &DeploymentEvent{}
+ case "DeploymentStatusEvent":
+ payload = &DeploymentStatusEvent{}
+ case "ForkEvent":
+ payload = &ForkEvent{}
+ case "GollumEvent":
+ payload = &GollumEvent{}
+ case "IntegrationInstallationEvent":
+ payload = &IntegrationInstallationEvent{}
+ case "IntegrationInstallationRepositoriesEvent":
+ payload = &IntegrationInstallationRepositoriesEvent{}
+ case "IssueActivityEvent":
+ payload = &IssueActivityEvent{}
+ case "IssueCommentEvent":
+ payload = &IssueCommentEvent{}
+ case "IssuesEvent":
+ payload = &IssuesEvent{}
+ case "LabelEvent":
+ payload = &LabelEvent{}
+ case "MemberEvent":
+ payload = &MemberEvent{}
+ case "MembershipEvent":
+ payload = &MembershipEvent{}
+ case "MilestoneEvent":
+ payload = &MilestoneEvent{}
+ case "OrganizationEvent":
+ payload = &OrganizationEvent{}
+ case "PageBuildEvent":
+ payload = &PageBuildEvent{}
+ case "PingEvent":
+ payload = &PingEvent{}
+ case "PublicEvent":
+ payload = &PublicEvent{}
+ case "PullRequestEvent":
+ payload = &PullRequestEvent{}
+ case "PullRequestReviewEvent":
+ payload = &PullRequestReviewEvent{}
+ case "PullRequestReviewCommentEvent":
+ payload = &PullRequestReviewCommentEvent{}
+ case "PushEvent":
+ payload = &PushEvent{}
+ case "ReleaseEvent":
+ payload = &ReleaseEvent{}
+ case "RepositoryEvent":
+ payload = &RepositoryEvent{}
+ case "StatusEvent":
+ payload = &StatusEvent{}
+ case "TeamAddEvent":
+ payload = &TeamAddEvent{}
+ case "WatchEvent":
+ payload = &WatchEvent{}
+ }
+ if err := json.Unmarshal(*e.RawPayload, &payload); err != nil {
+ panic(err.Error())
+ }
+ return payload
+}
+
+// ListEvents drinks from the firehose of all public events across GitHub.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events
+func (s *ActivityService) ListEvents(opt *ListOptions) ([]*Event, *Response, error) {
+ u, err := addOptions("events", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ events := new([]*Event)
+ resp, err := s.client.Do(req, events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *events, resp, err
+}
+
+// ListRepositoryEvents lists events for a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/#list-repository-events
+func (s *ActivityService) ListRepositoryEvents(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/events", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ events := new([]*Event)
+ resp, err := s.client.Do(req, events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *events, resp, err
+}
+
+// ListIssueEventsForRepository lists issue events for a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/#list-issue-events-for-a-repository
+func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ events := new([]*IssueEvent)
+ resp, err := s.client.Do(req, events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *events, resp, err
+}
+
+// ListEventsForRepoNetwork lists public events for a network of repositories.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events-for-a-network-of-repositories
+func (s *ActivityService) ListEventsForRepoNetwork(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
+ u := fmt.Sprintf("networks/%v/%v/events", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ events := new([]*Event)
+ resp, err := s.client.Do(req, events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *events, resp, err
+}
+
+// ListEventsForOrganization lists public events for an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events-for-an-organization
+func (s *ActivityService) ListEventsForOrganization(org string, opt *ListOptions) ([]*Event, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/events", org)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ events := new([]*Event)
+ resp, err := s.client.Do(req, events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *events, resp, err
+}
+
+// ListEventsPerformedByUser lists the events performed by a user. If publicOnly is
+// true, only public events will be returned.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-performed-by-a-user
+func (s *ActivityService) ListEventsPerformedByUser(user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
+ var u string
+ if publicOnly {
+ u = fmt.Sprintf("users/%v/events/public", user)
+ } else {
+ u = fmt.Sprintf("users/%v/events", user)
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ events := new([]*Event)
+ resp, err := s.client.Do(req, events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *events, resp, err
+}
+
+// ListEventsReceivedByUser lists the events received by a user. If publicOnly is
+// true, only public events will be returned.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-that-a-user-has-received
+func (s *ActivityService) ListEventsReceivedByUser(user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
+ var u string
+ if publicOnly {
+ u = fmt.Sprintf("users/%v/received_events/public", user)
+ } else {
+ u = fmt.Sprintf("users/%v/received_events", user)
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ events := new([]*Event)
+ resp, err := s.client.Do(req, events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *events, resp, err
+}
+
+// ListUserEventsForOrganization provides the user’s organization dashboard. You
+// must be authenticated as the user to view this.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-for-an-organization
+func (s *ActivityService) ListUserEventsForOrganization(org, user string, opt *ListOptions) ([]*Event, *Response, error) {
+ u := fmt.Sprintf("users/%v/events/orgs/%v", user, org)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ events := new([]*Event)
+ resp, err := s.client.Do(req, events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *events, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/activity_notifications.go b/vendor/github.com/google/go-github/github/activity_notifications.go
new file mode 100644
index 0000000..b538a7b
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/activity_notifications.go
@@ -0,0 +1,222 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// Notification identifies a GitHub notification for a user.
+type Notification struct {
+ ID *string `json:"id,omitempty"`
+ Repository *Repository `json:"repository,omitempty"`
+ Subject *NotificationSubject `json:"subject,omitempty"`
+
+ // Reason identifies the event that triggered the notification.
+ //
+ // GitHub API Docs: https://developer.github.com/v3/activity/notifications/#notification-reasons
+ Reason *string `json:"reason,omitempty"`
+
+ Unread *bool `json:"unread,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ LastReadAt *time.Time `json:"last_read_at,omitempty"`
+ URL *string `json:"url,omitempty"`
+}
+
+// NotificationSubject identifies the subject of a notification.
+type NotificationSubject struct {
+ Title *string `json:"title,omitempty"`
+ URL *string `json:"url,omitempty"`
+ LatestCommentURL *string `json:"latest_comment_url,omitempty"`
+ Type *string `json:"type,omitempty"`
+}
+
+// NotificationListOptions specifies the optional parameters to the
+// ActivityService.ListNotifications method.
+type NotificationListOptions struct {
+ All bool `url:"all,omitempty"`
+ Participating bool `url:"participating,omitempty"`
+ Since time.Time `url:"since,omitempty"`
+ Before time.Time `url:"before,omitempty"`
+
+ ListOptions
+}
+
+// ListNotifications lists all notifications for the authenticated user.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications
+func (s *ActivityService) ListNotifications(opt *NotificationListOptions) ([]*Notification, *Response, error) {
+ u := fmt.Sprintf("notifications")
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var notifications []*Notification
+ resp, err := s.client.Do(req, ¬ifications)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return notifications, resp, err
+}
+
+// ListRepositoryNotifications lists all notifications in a given repository
+// for the authenticated user.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications-in-a-repository
+func (s *ActivityService) ListRepositoryNotifications(owner, repo string, opt *NotificationListOptions) ([]*Notification, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var notifications []*Notification
+ resp, err := s.client.Do(req, ¬ifications)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return notifications, resp, err
+}
+
+type markReadOptions struct {
+ LastReadAt time.Time `json:"last_read_at,omitempty"`
+}
+
+// MarkNotificationsRead marks all notifications up to lastRead as read.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-as-read
+func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response, error) {
+ opts := &markReadOptions{
+ LastReadAt: lastRead,
+ }
+ req, err := s.client.NewRequest("PUT", "notifications", opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// MarkRepositoryNotificationsRead marks all notifications up to lastRead in
+// the specified repository as read.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository
+func (s *ActivityService) MarkRepositoryNotificationsRead(owner, repo string, lastRead time.Time) (*Response, error) {
+ opts := &markReadOptions{
+ LastReadAt: lastRead,
+ }
+ u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
+ req, err := s.client.NewRequest("PUT", u, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// GetThread gets the specified notification thread.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#view-a-single-thread
+func (s *ActivityService) GetThread(id string) (*Notification, *Response, error) {
+ u := fmt.Sprintf("notifications/threads/%v", id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ notification := new(Notification)
+ resp, err := s.client.Do(req, notification)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return notification, resp, err
+}
+
+// MarkThreadRead marks the specified thread as read.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-a-thread-as-read
+func (s *ActivityService) MarkThreadRead(id string) (*Response, error) {
+ u := fmt.Sprintf("notifications/threads/%v", id)
+
+ req, err := s.client.NewRequest("PATCH", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// GetThreadSubscription checks to see if the authenticated user is subscribed
+// to a thread.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#get-a-thread-subscription
+func (s *ActivityService) GetThreadSubscription(id string) (*Subscription, *Response, error) {
+ u := fmt.Sprintf("notifications/threads/%v/subscription", id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sub := new(Subscription)
+ resp, err := s.client.Do(req, sub)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return sub, resp, err
+}
+
+// SetThreadSubscription sets the subscription for the specified thread for the
+// authenticated user.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#set-a-thread-subscription
+func (s *ActivityService) SetThreadSubscription(id string, subscription *Subscription) (*Subscription, *Response, error) {
+ u := fmt.Sprintf("notifications/threads/%v/subscription", id)
+
+ req, err := s.client.NewRequest("PUT", u, subscription)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sub := new(Subscription)
+ resp, err := s.client.Do(req, sub)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return sub, resp, err
+}
+
+// DeleteThreadSubscription deletes the subscription for the specified thread
+// for the authenticated user.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#delete-a-thread-subscription
+func (s *ActivityService) DeleteThreadSubscription(id string) (*Response, error) {
+ u := fmt.Sprintf("notifications/threads/%v/subscription", id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/activity_star.go b/vendor/github.com/google/go-github/github/activity_star.go
new file mode 100644
index 0000000..5df6814
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/activity_star.go
@@ -0,0 +1,132 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// StarredRepository is returned by ListStarred.
+type StarredRepository struct {
+ StarredAt *Timestamp `json:"starred_at,omitempty"`
+ Repository *Repository `json:"repo,omitempty"`
+}
+
+// Stargazer represents a user that has starred a repository.
+type Stargazer struct {
+ StarredAt *Timestamp `json:"starred_at,omitempty"`
+ User *User `json:"user,omitempty"`
+}
+
+// ListStargazers lists people who have starred the specified repo.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/starring/#list-stargazers
+func (s *ActivityService) ListStargazers(owner, repo string, opt *ListOptions) ([]*Stargazer, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/stargazers", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeStarringPreview)
+
+ stargazers := new([]*Stargazer)
+ resp, err := s.client.Do(req, stargazers)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *stargazers, resp, err
+}
+
+// ActivityListStarredOptions specifies the optional parameters to the
+// ActivityService.ListStarred method.
+type ActivityListStarredOptions struct {
+ // How to sort the repository list. Possible values are: created, updated,
+ // pushed, full_name. Default is "full_name".
+ Sort string `url:"sort,omitempty"`
+
+ // Direction in which to sort repositories. Possible values are: asc, desc.
+ // Default is "asc" when sort is "full_name", otherwise default is "desc".
+ Direction string `url:"direction,omitempty"`
+
+ ListOptions
+}
+
+// ListStarred lists all the repos starred by a user. Passing the empty string
+// will list the starred repositories for the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/starring/#list-repositories-being-starred
+func (s *ActivityService) ListStarred(user string, opt *ActivityListStarredOptions) ([]*StarredRepository, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/starred", user)
+ } else {
+ u = "user/starred"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeStarringPreview)
+
+ repos := new([]*StarredRepository)
+ resp, err := s.client.Do(req, repos)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *repos, resp, err
+}
+
+// IsStarred checks if a repository is starred by authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/activity/starring/#check-if-you-are-starring-a-repository
+func (s *ActivityService) IsStarred(owner, repo string) (bool, *Response, error) {
+ u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+ resp, err := s.client.Do(req, nil)
+ starred, err := parseBoolResponse(err)
+ return starred, resp, err
+}
+
+// Star a repository as the authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/activity/starring/#star-a-repository
+func (s *ActivityService) Star(owner, repo string) (*Response, error) {
+ u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// Unstar a repository as the authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/activity/starring/#unstar-a-repository
+func (s *ActivityService) Unstar(owner, repo string) (*Response, error) {
+ u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/activity_watching.go b/vendor/github.com/google/go-github/github/activity_watching.go
new file mode 100644
index 0000000..9a27541
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/activity_watching.go
@@ -0,0 +1,143 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Subscription identifies a repository or thread subscription.
+type Subscription struct {
+ Subscribed *bool `json:"subscribed,omitempty"`
+ Ignored *bool `json:"ignored,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ URL *string `json:"url,omitempty"`
+
+ // only populated for repository subscriptions
+ RepositoryURL *string `json:"repository_url,omitempty"`
+
+ // only populated for thread subscriptions
+ ThreadURL *string `json:"thread_url,omitempty"`
+}
+
+// ListWatchers lists watchers of a particular repo.
+//
+// GitHub API Docs: http://developer.github.com/v3/activity/watching/#list-watchers
+func (s *ActivityService) ListWatchers(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/subscribers", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ watchers := new([]*User)
+ resp, err := s.client.Do(req, watchers)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *watchers, resp, err
+}
+
+// ListWatched lists the repositories the specified user is watching. Passing
+// the empty string will fetch watched repos for the authenticated user.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/watching/#list-repositories-being-watched
+func (s *ActivityService) ListWatched(user string, opt *ListOptions) ([]*Repository, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/subscriptions", user)
+ } else {
+ u = "user/subscriptions"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ watched := new([]*Repository)
+ resp, err := s.client.Do(req, watched)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *watched, resp, err
+}
+
+// GetRepositorySubscription returns the subscription for the specified
+// repository for the authenticated user. If the authenticated user is not
+// watching the repository, a nil Subscription is returned.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/watching/#get-a-repository-subscription
+func (s *ActivityService) GetRepositorySubscription(owner, repo string) (*Subscription, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sub := new(Subscription)
+ resp, err := s.client.Do(req, sub)
+ if err != nil {
+ // if it's just a 404, don't return that as an error
+ _, err = parseBoolResponse(err)
+ return nil, resp, err
+ }
+
+ return sub, resp, err
+}
+
+// SetRepositorySubscription sets the subscription for the specified repository
+// for the authenticated user.
+//
+// To watch a repository, set subscription.Subscribed to true.
+// To ignore notifications made within a repository, set subscription.Ignored to true.
+// To stop watching a repository, use DeleteRepositorySubscription.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/watching/#set-a-repository-subscription
+func (s *ActivityService) SetRepositorySubscription(owner, repo string, subscription *Subscription) (*Subscription, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
+
+ req, err := s.client.NewRequest("PUT", u, subscription)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sub := new(Subscription)
+ resp, err := s.client.Do(req, sub)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return sub, resp, err
+}
+
+// DeleteRepositorySubscription deletes the subscription for the specified
+// repository for the authenticated user.
+//
+// This is used to stop watching a repository. To control whether or not to
+// receive notifications from a repository, use SetRepositorySubscription.
+//
+// GitHub API Docs: https://developer.github.com/v3/activity/watching/#delete-a-repository-subscription
+func (s *ActivityService) DeleteRepositorySubscription(owner, repo string) (*Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/admin.go b/vendor/github.com/google/go-github/github/admin.go
new file mode 100644
index 0000000..44d7a9f
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/admin.go
@@ -0,0 +1,98 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// AdminService handles communication with the admin related methods of the
+// GitHub API. These API routes are normally only accessible for GitHub
+// Enterprise installations.
+//
+// GitHub API docs: https://developer.github.com/v3/enterprise/
+type AdminService service
+
+// TeamLDAPMapping represents the mapping between a GitHub team and an LDAP group.
+type TeamLDAPMapping struct {
+ ID *int `json:"id,omitempty"`
+ LDAPDN *string `json:"ldap_dn,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Slug *string `json:"slug,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Privacy *string `json:"privacy,omitempty"`
+ Permission *string `json:"permission,omitempty"`
+
+ MembersURL *string `json:"members_url,omitempty"`
+ RepositoriesURL *string `json:"repositories_url,omitempty"`
+}
+
+func (m TeamLDAPMapping) String() string {
+ return Stringify(m)
+}
+
+// UserLDAPMapping represents the mapping between a GitHub user and an LDAP user.
+type UserLDAPMapping struct {
+ ID *int `json:"id,omitempty"`
+ LDAPDN *string `json:"ldap_dn,omitempty"`
+ Login *string `json:"login,omitempty"`
+ AvatarURL *string `json:"avatar_url,omitempty"`
+ GravatarID *string `json:"gravatar_id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ SiteAdmin *bool `json:"site_admin,omitempty"`
+
+ URL *string `json:"url,omitempty"`
+ EventsURL *string `json:"events_url,omitempty"`
+ FollowingURL *string `json:"following_url,omitempty"`
+ FollowersURL *string `json:"followers_url,omitempty"`
+ GistsURL *string `json:"gists_url,omitempty"`
+ OrganizationsURL *string `json:"organizations_url,omitempty"`
+ ReceivedEventsURL *string `json:"received_events_url,omitempty"`
+ ReposURL *string `json:"repos_url,omitempty"`
+ StarredURL *string `json:"starred_url,omitempty"`
+ SubscriptionsURL *string `json:"subscriptions_url,omitempty"`
+}
+
+func (m UserLDAPMapping) String() string {
+ return Stringify(m)
+}
+
+// UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user.
+//
+// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-user
+func (s *AdminService) UpdateUserLDAPMapping(user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) {
+ u := fmt.Sprintf("admin/ldap/users/%v/mapping", user)
+ req, err := s.client.NewRequest("PATCH", u, mapping)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m := new(UserLDAPMapping)
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, err
+}
+
+// UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group.
+//
+// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-team
+func (s *AdminService) UpdateTeamLDAPMapping(team int, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) {
+ u := fmt.Sprintf("admin/ldap/teams/%v/mapping", team)
+ req, err := s.client.NewRequest("PATCH", u, mapping)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m := new(TeamLDAPMapping)
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/authorizations.go b/vendor/github.com/google/go-github/github/authorizations.go
new file mode 100644
index 0000000..d5a5e63
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/authorizations.go
@@ -0,0 +1,427 @@
+// Copyright 2015 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Scope models a GitHub authorization scope.
+//
+// GitHub API docs:https://developer.github.com/v3/oauth/#scopes
+type Scope string
+
+// This is the set of scopes for GitHub API V3
+const (
+ ScopeNone Scope = "(no scope)" // REVISIT: is this actually returned, or just a documentation artifact?
+ ScopeUser Scope = "user"
+ ScopeUserEmail Scope = "user:email"
+ ScopeUserFollow Scope = "user:follow"
+ ScopePublicRepo Scope = "public_repo"
+ ScopeRepo Scope = "repo"
+ ScopeRepoDeployment Scope = "repo_deployment"
+ ScopeRepoStatus Scope = "repo:status"
+ ScopeDeleteRepo Scope = "delete_repo"
+ ScopeNotifications Scope = "notifications"
+ ScopeGist Scope = "gist"
+ ScopeReadRepoHook Scope = "read:repo_hook"
+ ScopeWriteRepoHook Scope = "write:repo_hook"
+ ScopeAdminRepoHook Scope = "admin:repo_hook"
+ ScopeAdminOrgHook Scope = "admin:org_hook"
+ ScopeReadOrg Scope = "read:org"
+ ScopeWriteOrg Scope = "write:org"
+ ScopeAdminOrg Scope = "admin:org"
+ ScopeReadPublicKey Scope = "read:public_key"
+ ScopeWritePublicKey Scope = "write:public_key"
+ ScopeAdminPublicKey Scope = "admin:public_key"
+ ScopeReadGPGKey Scope = "read:gpg_key"
+ ScopeWriteGPGKey Scope = "write:gpg_key"
+ ScopeAdminGPGKey Scope = "admin:gpg_key"
+)
+
+// AuthorizationsService handles communication with the authorization related
+// methods of the GitHub API.
+//
+// This service requires HTTP Basic Authentication; it cannot be accessed using
+// an OAuth token.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/
+type AuthorizationsService service
+
+// Authorization represents an individual GitHub authorization.
+type Authorization struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Scopes []Scope `json:"scopes,omitempty"`
+ Token *string `json:"token,omitempty"`
+ TokenLastEight *string `json:"token_last_eight,omitempty"`
+ HashedToken *string `json:"hashed_token,omitempty"`
+ App *AuthorizationApp `json:"app,omitempty"`
+ Note *string `json:"note,omitempty"`
+ NoteURL *string `json:"note_url,omitempty"`
+ UpdateAt *Timestamp `json:"updated_at,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ Fingerprint *string `json:"fingerprint,omitempty"`
+
+ // User is only populated by the Check and Reset methods.
+ User *User `json:"user,omitempty"`
+}
+
+func (a Authorization) String() string {
+ return Stringify(a)
+}
+
+// AuthorizationApp represents an individual GitHub app (in the context of authorization).
+type AuthorizationApp struct {
+ URL *string `json:"url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ ClientID *string `json:"client_id,omitempty"`
+}
+
+func (a AuthorizationApp) String() string {
+ return Stringify(a)
+}
+
+// Grant represents an OAuth application that has been granted access to an account.
+type Grant struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ App *AuthorizationApp `json:"app,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+ Scopes []string `json:"scopes,omitempty"`
+}
+
+func (g Grant) String() string {
+ return Stringify(g)
+}
+
+// AuthorizationRequest represents a request to create an authorization.
+type AuthorizationRequest struct {
+ Scopes []Scope `json:"scopes,omitempty"`
+ Note *string `json:"note,omitempty"`
+ NoteURL *string `json:"note_url,omitempty"`
+ ClientID *string `json:"client_id,omitempty"`
+ ClientSecret *string `json:"client_secret,omitempty"`
+ Fingerprint *string `json:"fingerprint,omitempty"`
+}
+
+func (a AuthorizationRequest) String() string {
+ return Stringify(a)
+}
+
+// AuthorizationUpdateRequest represents a request to update an authorization.
+//
+// Note that for any one update, you must only provide one of the "scopes"
+// fields. That is, you may provide only one of "Scopes", or "AddScopes", or
+// "RemoveScopes".
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization
+type AuthorizationUpdateRequest struct {
+ Scopes []string `json:"scopes,omitempty"`
+ AddScopes []string `json:"add_scopes,omitempty"`
+ RemoveScopes []string `json:"remove_scopes,omitempty"`
+ Note *string `json:"note,omitempty"`
+ NoteURL *string `json:"note_url,omitempty"`
+ Fingerprint *string `json:"fingerprint,omitempty"`
+}
+
+func (a AuthorizationUpdateRequest) String() string {
+ return Stringify(a)
+}
+
+// List the authorizations for the authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-authorizations
+func (s *AuthorizationsService) List(opt *ListOptions) ([]*Authorization, *Response, error) {
+ u := "authorizations"
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ auths := new([]*Authorization)
+ resp, err := s.client.Do(req, auths)
+ if err != nil {
+ return nil, resp, err
+ }
+ return *auths, resp, err
+}
+
+// Get a single authorization.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-authorization
+func (s *AuthorizationsService) Get(id int) (*Authorization, *Response, error) {
+ u := fmt.Sprintf("authorizations/%d", id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ a := new(Authorization)
+ resp, err := s.client.Do(req, a)
+ if err != nil {
+ return nil, resp, err
+ }
+ return a, resp, err
+}
+
+// Create a new authorization for the specified OAuth application.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization
+func (s *AuthorizationsService) Create(auth *AuthorizationRequest) (*Authorization, *Response, error) {
+ u := "authorizations"
+
+ req, err := s.client.NewRequest("POST", u, auth)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ a := new(Authorization)
+ resp, err := s.client.Do(req, a)
+ if err != nil {
+ return nil, resp, err
+ }
+ return a, resp, err
+}
+
+// GetOrCreateForApp creates a new authorization for the specified OAuth
+// application, only if an authorization for that application doesn’t already
+// exist for the user.
+//
+// If a new token is created, the HTTP status code will be "201 Created", and
+// the returned Authorization.Token field will be populated. If an existing
+// token is returned, the status code will be "200 OK" and the
+// Authorization.Token field will be empty.
+//
+// clientID is the OAuth Client ID with which to create the token.
+//
+// GitHub API docs:
+// - https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app
+// - https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app-and-fingerprint
+func (s *AuthorizationsService) GetOrCreateForApp(clientID string, auth *AuthorizationRequest) (*Authorization, *Response, error) {
+ var u string
+ if auth.Fingerprint == nil || *auth.Fingerprint == "" {
+ u = fmt.Sprintf("authorizations/clients/%v", clientID)
+ } else {
+ u = fmt.Sprintf("authorizations/clients/%v/%v", clientID, *auth.Fingerprint)
+ }
+
+ req, err := s.client.NewRequest("PUT", u, auth)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ a := new(Authorization)
+ resp, err := s.client.Do(req, a)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return a, resp, err
+}
+
+// Edit a single authorization.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization
+func (s *AuthorizationsService) Edit(id int, auth *AuthorizationUpdateRequest) (*Authorization, *Response, error) {
+ u := fmt.Sprintf("authorizations/%d", id)
+
+ req, err := s.client.NewRequest("PATCH", u, auth)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ a := new(Authorization)
+ resp, err := s.client.Do(req, a)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return a, resp, err
+}
+
+// Delete a single authorization.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-an-authorization
+func (s *AuthorizationsService) Delete(id int) (*Response, error) {
+ u := fmt.Sprintf("authorizations/%d", id)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// Check if an OAuth token is valid for a specific app.
+//
+// Note that this operation requires the use of BasicAuth, but where the
+// username is the OAuth application clientID, and the password is its
+// clientSecret. Invalid tokens will return a 404 Not Found.
+//
+// The returned Authorization.User field will be populated.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#check-an-authorization
+func (s *AuthorizationsService) Check(clientID string, token string) (*Authorization, *Response, error) {
+ u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ a := new(Authorization)
+ resp, err := s.client.Do(req, a)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return a, resp, err
+}
+
+// Reset is used to reset a valid OAuth token without end user involvement.
+// Applications must save the "token" property in the response, because changes
+// take effect immediately.
+//
+// Note that this operation requires the use of BasicAuth, but where the
+// username is the OAuth application clientID, and the password is its
+// clientSecret. Invalid tokens will return a 404 Not Found.
+//
+// The returned Authorization.User field will be populated.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#reset-an-authorization
+func (s *AuthorizationsService) Reset(clientID string, token string) (*Authorization, *Response, error) {
+ u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
+
+ req, err := s.client.NewRequest("POST", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ a := new(Authorization)
+ resp, err := s.client.Do(req, a)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return a, resp, err
+}
+
+// Revoke an authorization for an application.
+//
+// Note that this operation requires the use of BasicAuth, but where the
+// username is the OAuth application clientID, and the password is its
+// clientSecret. Invalid tokens will return a 404 Not Found.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#revoke-an-authorization-for-an-application
+func (s *AuthorizationsService) Revoke(clientID string, token string) (*Response, error) {
+ u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// ListGrants lists the set of OAuth applications that have been granted
+// access to a user's account. This will return one entry for each application
+// that has been granted access to the account, regardless of the number of
+// tokens an application has generated for the user.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-grants
+func (s *AuthorizationsService) ListGrants() ([]*Grant, *Response, error) {
+ req, err := s.client.NewRequest("GET", "applications/grants", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ grants := []*Grant{}
+ resp, err := s.client.Do(req, &grants)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return grants, resp, err
+}
+
+// GetGrant gets a single OAuth application grant.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-grant
+func (s *AuthorizationsService) GetGrant(id int) (*Grant, *Response, error) {
+ u := fmt.Sprintf("applications/grants/%d", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ grant := new(Grant)
+ resp, err := s.client.Do(req, grant)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return grant, resp, err
+}
+
+// DeleteGrant deletes an OAuth application grant. Deleting an application's
+// grant will also delete all OAuth tokens associated with the application for
+// the user.
+//
+// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-a-grant
+func (s *AuthorizationsService) DeleteGrant(id int) (*Response, error) {
+ u := fmt.Sprintf("applications/grants/%d", id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// CreateImpersonation creates an impersonation OAuth token.
+//
+// This requires admin permissions. With the returned Authorization.Token
+// you can e.g. create or delete a user's public SSH key. NOTE: creating a
+// new token automatically revokes an existing one.
+//
+// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#create-an-impersonation-oauth-token
+func (s *AuthorizationsService) CreateImpersonation(username string, authReq *AuthorizationRequest) (*Authorization, *Response, error) {
+ u := fmt.Sprintf("admin/users/%v/authorizations", username)
+ req, err := s.client.NewRequest("POST", u, authReq)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ a := new(Authorization)
+ resp, err := s.client.Do(req, a)
+ if err != nil {
+ return nil, resp, err
+ }
+ return a, resp, err
+}
+
+// DeleteImpersonation deletes an impersonation OAuth token.
+//
+// NOTE: there can be only one at a time.
+//
+// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#delete-an-impersonation-oauth-token
+func (s *AuthorizationsService) DeleteImpersonation(username string) (*Response, error) {
+ u := fmt.Sprintf("admin/users/%v/authorizations", username)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/doc.go b/vendor/github.com/google/go-github/github/doc.go
new file mode 100644
index 0000000..659dd82
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/doc.go
@@ -0,0 +1,160 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package github provides a client for using the GitHub API.
+
+Usage:
+
+ import "github.com/google/go-github/github"
+
+Construct a new GitHub client, then use the various services on the client to
+access different parts of the GitHub API. For example:
+
+ client := github.NewClient(nil)
+
+ // list all organizations for user "willnorris"
+ orgs, _, err := client.Organizations.List("willnorris", nil)
+
+Some API methods have optional parameters that can be passed. For example:
+
+ client := github.NewClient(nil)
+
+ // list public repositories for org "github"
+ opt := &github.RepositoryListByOrgOptions{Type: "public"}
+ repos, _, err := client.Repositories.ListByOrg("github", opt)
+
+The services of a client divide the API into logical chunks and correspond to
+the structure of the GitHub API documentation at
+http://developer.github.com/v3/.
+
+Authentication
+
+The go-github library does not directly handle authentication. Instead, when
+creating a new client, pass an http.Client that can handle authentication for
+you. The easiest and recommended way to do this is using the golang.org/x/oauth2
+library, but you can always use any other library that provides an http.Client.
+If you have an OAuth2 access token (for example, a personal API token), you can
+use it with the oauth2 library using:
+
+ import "golang.org/x/oauth2"
+
+ func main() {
+ ts := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: "... your access token ..."},
+ )
+ tc := oauth2.NewClient(oauth2.NoContext, ts)
+
+ client := github.NewClient(tc)
+
+ // list all repositories for the authenticated user
+ repos, _, err := client.Repositories.List("", nil)
+ }
+
+Note that when using an authenticated Client, all calls made by the client will
+include the specified OAuth token. Therefore, authenticated clients should
+almost never be shared between different users.
+
+See the oauth2 docs for complete instructions on using that library.
+
+For API methods that require HTTP Basic Authentication, use the
+BasicAuthTransport.
+
+Rate Limiting
+
+GitHub imposes a rate limit on all API clients. Unauthenticated clients are
+limited to 60 requests per hour, while authenticated clients can make up to
+5,000 requests per hour. To receive the higher rate limit when making calls
+that are not issued on behalf of a user, use the
+UnauthenticatedRateLimitedTransport.
+
+The Rate method on a client returns the rate limit information based on the most
+recent API call. This is updated on every call, but may be out of date if it's
+been some time since the last API call and other clients have made subsequent
+requests since then. You can always call RateLimits() directly to get the most
+up-to-date rate limit data for the client.
+
+To detect an API rate limit error, you can check if its type is *github.RateLimitError:
+
+ repos, _, err := client.Repositories.List("", nil)
+ if _, ok := err.(*github.RateLimitError); ok {
+ log.Println("hit rate limit")
+ }
+
+Learn more about GitHub rate limiting at
+http://developer.github.com/v3/#rate-limiting.
+
+Accepted Status
+
+Some endpoints may return a 202 Accepted status code, meaning that the
+information required is not yet ready and was scheduled to be gathered on
+the GitHub side. Methods known to behave like this are documented specifying
+this behavior.
+
+To detect this condition of error, you can check if its type is
+*github.AcceptedError:
+
+ stats, _, err := client.Repositories.ListContributorsStats(org, repo)
+ if _, ok := err.(*github.AcceptedError); ok {
+ log.Println("scheduled on GitHub side")
+ }
+
+Conditional Requests
+
+The GitHub API has good support for conditional requests which will help
+prevent you from burning through your rate limit, as well as help speed up your
+application. go-github does not handle conditional requests directly, but is
+instead designed to work with a caching http.Transport. We recommend using
+https://github.com/gregjones/httpcache for that.
+
+Learn more about GitHub conditional requests at
+https://developer.github.com/v3/#conditional-requests.
+
+Creating and Updating Resources
+
+All structs for GitHub resources use pointer values for all non-repeated fields.
+This allows distinguishing between unset fields and those set to a zero-value.
+Helper functions have been provided to easily create these pointers for string,
+bool, and int values. For example:
+
+ // create a new private repository named "foo"
+ repo := &github.Repository{
+ Name: github.String("foo"),
+ Private: github.Bool(true),
+ }
+ client.Repositories.Create("", repo)
+
+Users who have worked with protocol buffers should find this pattern familiar.
+
+Pagination
+
+All requests for resource collections (repos, pull requests, issues, etc.)
+support pagination. Pagination options are described in the
+github.ListOptions struct and passed to the list methods directly or as an
+embedded type of a more specific list options struct (for example
+github.PullRequestListOptions). Pages information is available via the
+github.Response struct.
+
+ client := github.NewClient(nil)
+
+ opt := &github.RepositoryListByOrgOptions{
+ ListOptions: github.ListOptions{PerPage: 10},
+ }
+ // get all pages of results
+ var allRepos []*github.Repository
+ for {
+ repos, resp, err := client.Repositories.ListByOrg("github", opt)
+ if err != nil {
+ return err
+ }
+ allRepos = append(allRepos, repos...)
+ if resp.NextPage == 0 {
+ break
+ }
+ opt.ListOptions.Page = resp.NextPage
+ }
+
+*/
+package github
diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go
new file mode 100644
index 0000000..d4bb5e4
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/event_types.go
@@ -0,0 +1,609 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// These event types are shared between the Events API and used as Webhook payloads.
+
+package github
+
+// CommitCommentEvent is triggered when a commit comment is created.
+// The Webhook event name is "commit_comment".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#commitcommentevent
+type CommitCommentEvent struct {
+ Comment *RepositoryComment `json:"comment,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Action *string `json:"action,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// CreateEvent represents a created repository, branch, or tag.
+// The Webhook event name is "create".
+//
+// Note: webhooks will not receive this event for created repositories.
+// Additionally, webhooks will not receive this event for tags if more
+// than three tags are pushed at once.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#createevent
+type CreateEvent struct {
+ Ref *string `json:"ref,omitempty"`
+ // RefType is the object that was created. Possible values are: "repository", "branch", "tag".
+ RefType *string `json:"ref_type,omitempty"`
+ MasterBranch *string `json:"master_branch,omitempty"`
+ Description *string `json:"description,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ PusherType *string `json:"pusher_type,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// DeleteEvent represents a deleted branch or tag.
+// The Webhook event name is "delete".
+//
+// Note: webhooks will not receive this event for tags if more than three tags
+// are deleted at once.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#deleteevent
+type DeleteEvent struct {
+ Ref *string `json:"ref,omitempty"`
+ // RefType is the object that was deleted. Possible values are: "branch", "tag".
+ RefType *string `json:"ref_type,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ PusherType *string `json:"pusher_type,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// DeploymentEvent represents a deployment.
+// The Webhook event name is "deployment".
+//
+// Events of this type are not visible in timelines, they are only used to trigger hooks.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#deploymentevent
+type DeploymentEvent struct {
+ Deployment *Deployment `json:"deployment,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// DeploymentStatusEvent represents a deployment status.
+// The Webhook event name is "deployment_status".
+//
+// Events of this type are not visible in timelines, they are only used to trigger hooks.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#deploymentstatusevent
+type DeploymentStatusEvent struct {
+ Deployment *Deployment `json:"deployment,omitempty"`
+ DeploymentStatus *DeploymentStatus `json:"deployment_status,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// ForkEvent is triggered when a user forks a repository.
+// The Webhook event name is "fork".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#forkevent
+type ForkEvent struct {
+ // Forkee is the created repository.
+ Forkee *Repository `json:"forkee,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// Page represents a single Wiki page.
+type Page struct {
+ PageName *string `json:"page_name,omitempty"`
+ Title *string `json:"title,omitempty"`
+ Summary *string `json:"summary,omitempty"`
+ Action *string `json:"action,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+}
+
+// GollumEvent is triggered when a Wiki page is created or updated.
+// The Webhook event name is "gollum".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#gollumevent
+type GollumEvent struct {
+ Pages []*Page `json:"pages,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// IssueActivityEvent represents the payload delivered by Issue webhook.
+//
+// Deprecated: Use IssuesEvent instead.
+type IssueActivityEvent struct {
+ Action *string `json:"action,omitempty"`
+ Issue *Issue `json:"issue,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// EditChange represents the changes when an issue, pull request, or comment has
+// been edited.
+type EditChange struct {
+ Title *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"title,omitempty"`
+ Body *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"body,omitempty"`
+}
+
+// IntegrationInstallationEvent is triggered when an integration is created or deleted.
+// The Webhook event name is "integration_installation".
+//
+// GitHub docs: https://developer.github.com/early-access/integrations/webhooks/#integrationinstallationevent
+type IntegrationInstallationEvent struct {
+ // The action that was performed. Possible values for an "integration_installation"
+ // event are: "created", "deleted".
+ Action *string `json:"action,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// IntegrationInstallationRepositoriesEvent is triggered when an integration repository
+// is added or removed. The Webhook event name is "integration_installation_repositories".
+//
+// GitHub docs: https://developer.github.com/early-access/integrations/webhooks/#integrationinstallationrepositoriesevent
+type IntegrationInstallationRepositoriesEvent struct {
+ // The action that was performed. Possible values for an "integration_installation_repositories"
+ // event are: "added", "removed".
+ Action *string `json:"action,omitempty"`
+ RepositoriesAdded []*Repository `json:"repositories_added,omitempty"`
+ RepositoriesRemoved []*Repository `json:"repositories_removed,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// IssueCommentEvent is triggered when an issue comment is created on an issue
+// or pull request.
+// The Webhook event name is "issue_comment".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#issuecommentevent
+type IssueCommentEvent struct {
+ // Action is the action that was performed on the comment.
+ // Possible values are: "created", "edited", "deleted".
+ Action *string `json:"action,omitempty"`
+ Issue *Issue `json:"issue,omitempty"`
+ Comment *IssueComment `json:"comment,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Changes *EditChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// IssuesEvent is triggered when an issue is assigned, unassigned, labeled,
+// unlabeled, opened, closed, or reopened.
+// The Webhook event name is "issues".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#issuesevent
+type IssuesEvent struct {
+ // Action is the action that was performed. Possible values are: "assigned",
+ // "unassigned", "labeled", "unlabeled", "opened", "closed", "reopened", "edited".
+ Action *string `json:"action,omitempty"`
+ Issue *Issue `json:"issue,omitempty"`
+ Assignee *User `json:"assignee,omitempty"`
+ Label *Label `json:"label,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Changes *EditChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// LabelEvent is triggered when a repository's label is created, edited, or deleted.
+// The Webhook event name is "label"
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#labelevent
+type LabelEvent struct {
+ // Action is the action that was performed. Possible values are:
+ // "created", "edited", "deleted"
+ Action *string `json:"action,omitempty"`
+ Label *Label `json:"label,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Changes *EditChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Org *Organization `json:"organization,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// MemberEvent is triggered when a user is added as a collaborator to a repository.
+// The Webhook event name is "member".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#memberevent
+type MemberEvent struct {
+ // Action is the action that was performed. Possible value is: "added".
+ Action *string `json:"action,omitempty"`
+ Member *User `json:"member,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// MembershipEvent is triggered when a user is added or removed from a team.
+// The Webhook event name is "membership".
+//
+// Events of this type are not visible in timelines, they are only used to
+// trigger organization webhooks.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#membershipevent
+type MembershipEvent struct {
+ // Action is the action that was performed. Possible values are: "added", "removed".
+ Action *string `json:"action,omitempty"`
+ // Scope is the scope of the membership. Possible value is: "team".
+ Scope *string `json:"scope,omitempty"`
+ Member *User `json:"member,omitempty"`
+ Team *Team `json:"team,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Org *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// MilestoneEvent is triggered when a milestone is created, closed, opened, edited, or deleted.
+// The Webhook event name is "milestone".
+//
+// Github docs: https://developer.github.com/v3/activity/events/types/#milestoneevent
+type MilestoneEvent struct {
+ // Action is the action that was performed. Possible values are:
+ // "created", "closed", "opened", "edited", "deleted"
+ Action *string `json:"action,omitempty"`
+ Milestone *Milestone `json:"milestone,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Changes *EditChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Org *Organization `json:"organization,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// OrganizationEvent is triggered when a user is added, removed, or invited to an organization.
+// Events of this type are not visible in timelines. These events are only used to trigger organization hooks.
+// Webhook event name is "organization".
+//
+// Github docs: https://developer.github.com/v3/activity/events/types/#organizationevent
+type OrganizationEvent struct {
+ // Action is the action that was performed.
+ // Can be one of "member_added", "member_removed", or "member_invited".
+ Action *string `json:"action,omitempty"`
+
+ // Invitaion is the invitation for the user or email if the action is "member_invited".
+ Invitation *Invitation `json:"invitation,omitempty"`
+
+ // Membership is the membership between the user and the organization.
+ // Not present when the action is "member_invited".
+ Membership *Membership `json:"membership,omitempty"`
+
+ Organization *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// PageBuildEvent represents an attempted build of a GitHub Pages site, whether
+// successful or not.
+// The Webhook event name is "page_build".
+//
+// This event is triggered on push to a GitHub Pages enabled branch (gh-pages
+// for project pages, master for user and organization pages).
+//
+// Events of this type are not visible in timelines, they are only used to trigger hooks.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#pagebuildevent
+type PageBuildEvent struct {
+ Build *PagesBuild `json:"build,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ ID *int `json:"id,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// PingEvent is triggered when a Webhook is added to GitHub.
+//
+// GitHub docs: https://developer.github.com/webhooks/#ping-event
+type PingEvent struct {
+ // Random string of GitHub zen.
+ Zen *string `json:"zen,omitempty"`
+ // The ID of the webhook that triggered the ping.
+ HookID *int `json:"hook_id,omitempty"`
+ // The webhook configuration.
+ Hook *Hook `json:"hook,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// PublicEvent is triggered when a private repository is open sourced.
+// According to GitHub: "Without a doubt: the best GitHub event."
+// The Webhook event name is "public".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#publicevent
+type PublicEvent struct {
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// PullRequestEvent is triggered when a pull request is assigned, unassigned,
+// labeled, unlabeled, opened, closed, reopened, or synchronized.
+// The Webhook event name is "pull_request".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestevent
+type PullRequestEvent struct {
+ // Action is the action that was performed. Possible values are: "assigned",
+ // "unassigned", "labeled", "unlabeled", "opened", "closed", or "reopened",
+ // "synchronize", "edited". If the action is "closed" and the merged key is false,
+ // the pull request was closed with unmerged commits. If the action is "closed"
+ // and the merged key is true, the pull request was merged.
+ Action *string `json:"action,omitempty"`
+ Number *int `json:"number,omitempty"`
+ PullRequest *PullRequest `json:"pull_request,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Changes *EditChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// PullRequestReviewEvent is triggered when a review is submitted on a pull
+// request.
+// The Webhook event name is "pull_request_review".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewevent
+type PullRequestReviewEvent struct {
+ // Action is always "submitted".
+ Action *string `json:"action,omitempty"`
+ Review *PullRequestReview `json:"review,omitempty"`
+ PullRequest *PullRequest `json:"pull_request,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+
+ // The following field is only present when the webhook is triggered on
+ // a repository belonging to an organization.
+ Organization *Organization `json:"organization,omitempty"`
+}
+
+// PullRequestReviewCommentEvent is triggered when a comment is created on a
+// portion of the unified diff of a pull request.
+// The Webhook event name is "pull_request_review_comment".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent
+type PullRequestReviewCommentEvent struct {
+ // Action is the action that was performed on the comment.
+ // Possible values are: "created", "edited", "deleted".
+ Action *string `json:"action,omitempty"`
+ PullRequest *PullRequest `json:"pull_request,omitempty"`
+ Comment *PullRequestComment `json:"comment,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Changes *EditChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// PushEvent represents a git push to a GitHub repository.
+//
+// GitHub API docs: http://developer.github.com/v3/activity/events/types/#pushevent
+type PushEvent struct {
+ PushID *int `json:"push_id,omitempty"`
+ Head *string `json:"head,omitempty"`
+ Ref *string `json:"ref,omitempty"`
+ Size *int `json:"size,omitempty"`
+ Commits []PushEventCommit `json:"commits,omitempty"`
+ Repo *PushEventRepository `json:"repository,omitempty"`
+ Before *string `json:"before,omitempty"`
+ DistinctSize *int `json:"distinct_size,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ After *string `json:"after,omitempty"`
+ Created *bool `json:"created,omitempty"`
+ Deleted *bool `json:"deleted,omitempty"`
+ Forced *bool `json:"forced,omitempty"`
+ BaseRef *string `json:"base_ref,omitempty"`
+ Compare *string `json:"compare,omitempty"`
+ HeadCommit *PushEventCommit `json:"head_commit,omitempty"`
+ Pusher *User `json:"pusher,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+func (p PushEvent) String() string {
+ return Stringify(p)
+}
+
+// PushEventCommit represents a git commit in a GitHub PushEvent.
+type PushEventCommit struct {
+ Message *string `json:"message,omitempty"`
+ Author *CommitAuthor `json:"author,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Distinct *bool `json:"distinct,omitempty"`
+
+ // The following fields are only populated by Events API.
+ SHA *string `json:"sha,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ ID *string `json:"id,omitempty"`
+ TreeID *string `json:"tree_id,omitempty"`
+ Timestamp *Timestamp `json:"timestamp,omitempty"`
+ Committer *CommitAuthor `json:"committer,omitempty"`
+ Added []string `json:"added,omitempty"`
+ Removed []string `json:"removed,omitempty"`
+ Modified []string `json:"modified,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+func (p PushEventCommit) String() string {
+ return Stringify(p)
+}
+
+// PushEventRepository represents the repo object in a PushEvent payload.
+type PushEventRepository struct {
+ ID *int `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ FullName *string `json:"full_name,omitempty"`
+ Owner *PushEventRepoOwner `json:"owner,omitempty"`
+ Private *bool `json:"private,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Fork *bool `json:"fork,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ PushedAt *Timestamp `json:"pushed_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+ Homepage *string `json:"homepage,omitempty"`
+ Size *int `json:"size,omitempty"`
+ StargazersCount *int `json:"stargazers_count,omitempty"`
+ WatchersCount *int `json:"watchers_count,omitempty"`
+ Language *string `json:"language,omitempty"`
+ HasIssues *bool `json:"has_issues,omitempty"`
+ HasDownloads *bool `json:"has_downloads,omitempty"`
+ HasWiki *bool `json:"has_wiki,omitempty"`
+ HasPages *bool `json:"has_pages,omitempty"`
+ ForksCount *int `json:"forks_count,omitempty"`
+ OpenIssuesCount *int `json:"open_issues_count,omitempty"`
+ DefaultBranch *string `json:"default_branch,omitempty"`
+ MasterBranch *string `json:"master_branch,omitempty"`
+ Organization *string `json:"organization,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// PushEventRepoOwner is a basic representation of user/org in a PushEvent payload.
+type PushEventRepoOwner struct {
+ Name *string `json:"name,omitempty"`
+ Email *string `json:"email,omitempty"`
+}
+
+// ReleaseEvent is triggered when a release is published.
+// The Webhook event name is "release".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#releaseevent
+type ReleaseEvent struct {
+ // Action is the action that was performed. Possible value is: "published".
+ Action *string `json:"action,omitempty"`
+ Release *RepositoryRelease `json:"release,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// RepositoryEvent is triggered when a repository is created.
+// The Webhook event name is "repository".
+//
+// Events of this type are not visible in timelines, they are only used to
+// trigger organization webhooks.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#repositoryevent
+type RepositoryEvent struct {
+ // Action is the action that was performed. Possible values are: "created", "deleted",
+ // "publicized", "privatized".
+ Action *string `json:"action,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Org *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// StatusEvent is triggered when the status of a Git commit changes.
+// The Webhook event name is "status".
+//
+// Events of this type are not visible in timelines, they are only used to
+// trigger hooks.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#statusevent
+type StatusEvent struct {
+ SHA *string `json:"sha,omitempty"`
+ // State is the new state. Possible values are: "pending", "success", "failure", "error".
+ State *string `json:"state,omitempty"`
+ Description *string `json:"description,omitempty"`
+ TargetURL *string `json:"target_url,omitempty"`
+ Branches []*Branch `json:"branches,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ ID *int `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Context *string `json:"context,omitempty"`
+ Commit *RepositoryCommit `json:"commit,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// TeamAddEvent is triggered when a repository is added to a team.
+// The Webhook event name is "team_add".
+//
+// Events of this type are not visible in timelines. These events are only used
+// to trigger hooks.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#teamaddevent
+type TeamAddEvent struct {
+ Team *Team `json:"team,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Org *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// WatchEvent is related to starring a repository, not watching. See this API
+// blog post for an explanation: https://developer.github.com/changes/2012-09-05-watcher-api/
+//
+// The event’s actor is the user who starred a repository, and the event’s
+// repository is the repository that was starred.
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#watchevent
+type WatchEvent struct {
+ // Action is the action that was performed. Possible value is: "started".
+ Action *string `json:"action,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
diff --git a/vendor/github.com/google/go-github/github/gists.go b/vendor/github.com/google/go-github/github/gists.go
new file mode 100644
index 0000000..a3327f8
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/gists.go
@@ -0,0 +1,344 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// GistsService handles communication with the Gist related
+// methods of the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/
+type GistsService service
+
+// Gist represents a GitHub's gist.
+type Gist struct {
+ ID *string `json:"id,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Public *bool `json:"public,omitempty"`
+ Owner *User `json:"owner,omitempty"`
+ Files map[GistFilename]GistFile `json:"files,omitempty"`
+ Comments *int `json:"comments,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ GitPullURL *string `json:"git_pull_url,omitempty"`
+ GitPushURL *string `json:"git_push_url,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+}
+
+func (g Gist) String() string {
+ return Stringify(g)
+}
+
+// GistFilename represents filename on a gist.
+type GistFilename string
+
+// GistFile represents a file on a gist.
+type GistFile struct {
+ Size *int `json:"size,omitempty"`
+ Filename *string `json:"filename,omitempty"`
+ Type *string `json:"type,omitempty"`
+ RawURL *string `json:"raw_url,omitempty"`
+ Content *string `json:"content,omitempty"`
+}
+
+func (g GistFile) String() string {
+ return Stringify(g)
+}
+
+// GistCommit represents a commit on a gist.
+type GistCommit struct {
+ URL *string `json:"url,omitempty"`
+ Version *string `json:"version,omitempty"`
+ User *User `json:"user,omitempty"`
+ ChangeStatus *CommitStats `json:"change_status,omitempty"`
+ CommitedAt *Timestamp `json:"commited_at,omitempty"`
+}
+
+func (gc GistCommit) String() string {
+ return Stringify(gc)
+}
+
+// GistFork represents a fork of a gist.
+type GistFork struct {
+ URL *string `json:"url,omitempty"`
+ User *User `json:"user,omitempty"`
+ ID *string `json:"id,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+}
+
+func (gf GistFork) String() string {
+ return Stringify(gf)
+}
+
+// GistListOptions specifies the optional parameters to the
+// GistsService.List, GistsService.ListAll, and GistsService.ListStarred methods.
+type GistListOptions struct {
+ // Since filters Gists by time.
+ Since time.Time `url:"since,omitempty"`
+
+ ListOptions
+}
+
+// List gists for a user. Passing the empty string will list
+// all public gists if called anonymously. However, if the call
+// is authenticated, it will returns all gists for the authenticated
+// user.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#list-gists
+func (s *GistsService) List(user string, opt *GistListOptions) ([]*Gist, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/gists", user)
+ } else {
+ u = "gists"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gists := new([]*Gist)
+ resp, err := s.client.Do(req, gists)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *gists, resp, err
+}
+
+// ListAll lists all public gists.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#list-gists
+func (s *GistsService) ListAll(opt *GistListOptions) ([]*Gist, *Response, error) {
+ u, err := addOptions("gists/public", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gists := new([]*Gist)
+ resp, err := s.client.Do(req, gists)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *gists, resp, err
+}
+
+// ListStarred lists starred gists of authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#list-gists
+func (s *GistsService) ListStarred(opt *GistListOptions) ([]*Gist, *Response, error) {
+ u, err := addOptions("gists/starred", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gists := new([]*Gist)
+ resp, err := s.client.Do(req, gists)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *gists, resp, err
+}
+
+// Get a single gist.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#get-a-single-gist
+func (s *GistsService) Get(id string) (*Gist, *Response, error) {
+ u := fmt.Sprintf("gists/%v", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ gist := new(Gist)
+ resp, err := s.client.Do(req, gist)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return gist, resp, err
+}
+
+// GetRevision gets a specific revision of a gist.
+//
+// GitHub API docs: https://developer.github.com/v3/gists/#get-a-specific-revision-of-a-gist
+func (s *GistsService) GetRevision(id, sha string) (*Gist, *Response, error) {
+ u := fmt.Sprintf("gists/%v/%v", id, sha)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ gist := new(Gist)
+ resp, err := s.client.Do(req, gist)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return gist, resp, err
+}
+
+// Create a gist for authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#create-a-gist
+func (s *GistsService) Create(gist *Gist) (*Gist, *Response, error) {
+ u := "gists"
+ req, err := s.client.NewRequest("POST", u, gist)
+ if err != nil {
+ return nil, nil, err
+ }
+ g := new(Gist)
+ resp, err := s.client.Do(req, g)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return g, resp, err
+}
+
+// Edit a gist.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#edit-a-gist
+func (s *GistsService) Edit(id string, gist *Gist) (*Gist, *Response, error) {
+ u := fmt.Sprintf("gists/%v", id)
+ req, err := s.client.NewRequest("PATCH", u, gist)
+ if err != nil {
+ return nil, nil, err
+ }
+ g := new(Gist)
+ resp, err := s.client.Do(req, g)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return g, resp, err
+}
+
+// ListCommits lists commits of a gist.
+//
+// Github API docs: https://developer.github.com/v3/gists/#list-gist-commits
+func (s *GistsService) ListCommits(id string) ([]*GistCommit, *Response, error) {
+ u := fmt.Sprintf("gists/%v/commits", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gistCommits := new([]*GistCommit)
+ resp, err := s.client.Do(req, gistCommits)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *gistCommits, resp, err
+}
+
+// Delete a gist.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#delete-a-gist
+func (s *GistsService) Delete(id string) (*Response, error) {
+ u := fmt.Sprintf("gists/%v", id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// Star a gist on behalf of authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#star-a-gist
+func (s *GistsService) Star(id string) (*Response, error) {
+ u := fmt.Sprintf("gists/%v/star", id)
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// Unstar a gist on a behalf of authenticated user.
+//
+// Github API docs: http://developer.github.com/v3/gists/#unstar-a-gist
+func (s *GistsService) Unstar(id string) (*Response, error) {
+ u := fmt.Sprintf("gists/%v/star", id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// IsStarred checks if a gist is starred by authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#check-if-a-gist-is-starred
+func (s *GistsService) IsStarred(id string) (bool, *Response, error) {
+ u := fmt.Sprintf("gists/%v/star", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+ resp, err := s.client.Do(req, nil)
+ starred, err := parseBoolResponse(err)
+ return starred, resp, err
+}
+
+// Fork a gist.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/#fork-a-gist
+func (s *GistsService) Fork(id string) (*Gist, *Response, error) {
+ u := fmt.Sprintf("gists/%v/forks", id)
+ req, err := s.client.NewRequest("POST", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ g := new(Gist)
+ resp, err := s.client.Do(req, g)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return g, resp, err
+}
+
+// ListForks lists forks of a gist.
+//
+// Github API docs: https://developer.github.com/v3/gists/#list-gist-forks
+func (s *GistsService) ListForks(id string) ([]*GistFork, *Response, error) {
+ u := fmt.Sprintf("gists/%v/forks", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gistForks := new([]*GistFork)
+ resp, err := s.client.Do(req, gistForks)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *gistForks, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/gists_comments.go b/vendor/github.com/google/go-github/github/gists_comments.go
new file mode 100644
index 0000000..95a7fc7
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/gists_comments.go
@@ -0,0 +1,118 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// GistComment represents a Gist comment.
+type GistComment struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Body *string `json:"body,omitempty"`
+ User *User `json:"user,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+}
+
+func (g GistComment) String() string {
+ return Stringify(g)
+}
+
+// ListComments lists all comments for a gist.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/comments/#list-comments-on-a-gist
+func (s *GistsService) ListComments(gistID string, opt *ListOptions) ([]*GistComment, *Response, error) {
+ u := fmt.Sprintf("gists/%v/comments", gistID)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ comments := new([]*GistComment)
+ resp, err := s.client.Do(req, comments)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *comments, resp, err
+}
+
+// GetComment retrieves a single comment from a gist.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/comments/#get-a-single-comment
+func (s *GistsService) GetComment(gistID string, commentID int) (*GistComment, *Response, error) {
+ u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := new(GistComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// CreateComment creates a comment for a gist.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/comments/#create-a-comment
+func (s *GistsService) CreateComment(gistID string, comment *GistComment) (*GistComment, *Response, error) {
+ u := fmt.Sprintf("gists/%v/comments", gistID)
+ req, err := s.client.NewRequest("POST", u, comment)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := new(GistComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// EditComment edits an existing gist comment.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/comments/#edit-a-comment
+func (s *GistsService) EditComment(gistID string, commentID int, comment *GistComment) (*GistComment, *Response, error) {
+ u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
+ req, err := s.client.NewRequest("PATCH", u, comment)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := new(GistComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// DeleteComment deletes a gist comment.
+//
+// GitHub API docs: http://developer.github.com/v3/gists/comments/#delete-a-comment
+func (s *GistsService) DeleteComment(gistID string, commentID int) (*Response, error) {
+ u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/git.go b/vendor/github.com/google/go-github/github/git.go
new file mode 100644
index 0000000..c934751
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/git.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+// GitService handles communication with the git data related
+// methods of the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/git/
+type GitService service
diff --git a/vendor/github.com/google/go-github/github/git_blobs.go b/vendor/github.com/google/go-github/github/git_blobs.go
new file mode 100644
index 0000000..55148fd
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/git_blobs.go
@@ -0,0 +1,47 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Blob represents a blob object.
+type Blob struct {
+ Content *string `json:"content,omitempty"`
+ Encoding *string `json:"encoding,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ Size *int `json:"size,omitempty"`
+ URL *string `json:"url,omitempty"`
+}
+
+// GetBlob fetchs a blob from a repo given a SHA.
+//
+// GitHub API docs: http://developer.github.com/v3/git/blobs/#get-a-blob
+func (s *GitService) GetBlob(owner string, repo string, sha string) (*Blob, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ blob := new(Blob)
+ resp, err := s.client.Do(req, blob)
+ return blob, resp, err
+}
+
+// CreateBlob creates a blob object.
+//
+// GitHub API docs: https://developer.github.com/v3/git/blobs/#create-a-blob
+func (s *GitService) CreateBlob(owner string, repo string, blob *Blob) (*Blob, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/blobs", owner, repo)
+ req, err := s.client.NewRequest("POST", u, blob)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Blob)
+ resp, err := s.client.Do(req, t)
+ return t, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/git_commits.go b/vendor/github.com/google/go-github/github/git_commits.go
new file mode 100644
index 0000000..0bcad41
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/git_commits.go
@@ -0,0 +1,127 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// SignatureVerification represents GPG signature verification.
+type SignatureVerification struct {
+ Verified *bool `json:"verified,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Signature *string `json:"signature,omitempty"`
+ Payload *string `json:"payload,omitempty"`
+}
+
+// Commit represents a GitHub commit.
+type Commit struct {
+ SHA *string `json:"sha,omitempty"`
+ Author *CommitAuthor `json:"author,omitempty"`
+ Committer *CommitAuthor `json:"committer,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Tree *Tree `json:"tree,omitempty"`
+ Parents []Commit `json:"parents,omitempty"`
+ Stats *CommitStats `json:"stats,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Verification *SignatureVerification `json:"verification,omitempty"`
+
+ // CommentCount is the number of GitHub comments on the commit. This
+ // is only populated for requests that fetch GitHub data like
+ // Pulls.ListCommits, Repositories.ListCommits, etc.
+ CommentCount *int `json:"comment_count,omitempty"`
+}
+
+func (c Commit) String() string {
+ return Stringify(c)
+}
+
+// CommitAuthor represents the author or committer of a commit. The commit
+// author may not correspond to a GitHub User.
+type CommitAuthor struct {
+ Date *time.Time `json:"date,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Email *string `json:"email,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Login *string `json:"username,omitempty"` // Renamed for go-github consistency.
+}
+
+func (c CommitAuthor) String() string {
+ return Stringify(c)
+}
+
+// GetCommit fetchs the Commit object for a given SHA.
+//
+// GitHub API docs: http://developer.github.com/v3/git/commits/#get-a-commit
+func (s *GitService) GetCommit(owner string, repo string, sha string) (*Commit, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/commits/%v", owner, repo, sha)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
+ c := new(Commit)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// createCommit represents the body of a CreateCommit request.
+type createCommit struct {
+ Author *CommitAuthor `json:"author,omitempty"`
+ Committer *CommitAuthor `json:"committer,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Tree *string `json:"tree,omitempty"`
+ Parents []string `json:"parents,omitempty"`
+}
+
+// CreateCommit creates a new commit in a repository.
+//
+// The commit.Committer is optional and will be filled with the commit.Author
+// data if omitted. If the commit.Author is omitted, it will be filled in with
+// the authenticated user’s information and the current date.
+//
+// GitHub API docs: http://developer.github.com/v3/git/commits/#create-a-commit
+func (s *GitService) CreateCommit(owner string, repo string, commit *Commit) (*Commit, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/commits", owner, repo)
+
+ body := &createCommit{}
+ if commit != nil {
+ parents := make([]string, len(commit.Parents))
+ for i, parent := range commit.Parents {
+ parents[i] = *parent.SHA
+ }
+
+ body = &createCommit{
+ Author: commit.Author,
+ Committer: commit.Committer,
+ Message: commit.Message,
+ Tree: commit.Tree.SHA,
+ Parents: parents,
+ }
+ }
+
+ req, err := s.client.NewRequest("POST", u, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := new(Commit)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/git_refs.go b/vendor/github.com/google/go-github/github/git_refs.go
new file mode 100644
index 0000000..16cbd6b
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/git_refs.go
@@ -0,0 +1,162 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Reference represents a GitHub reference.
+type Reference struct {
+ Ref *string `json:"ref"`
+ URL *string `json:"url"`
+ Object *GitObject `json:"object"`
+}
+
+func (r Reference) String() string {
+ return Stringify(r)
+}
+
+// GitObject represents a Git object.
+type GitObject struct {
+ Type *string `json:"type"`
+ SHA *string `json:"sha"`
+ URL *string `json:"url"`
+}
+
+func (o GitObject) String() string {
+ return Stringify(o)
+}
+
+// createRefRequest represents the payload for creating a reference.
+type createRefRequest struct {
+ Ref *string `json:"ref"`
+ SHA *string `json:"sha"`
+}
+
+// updateRefRequest represents the payload for updating a reference.
+type updateRefRequest struct {
+ SHA *string `json:"sha"`
+ Force *bool `json:"force"`
+}
+
+// GetRef fetches the Reference object for a given Git ref.
+//
+// GitHub API docs: http://developer.github.com/v3/git/refs/#get-a-reference
+func (s *GitService) GetRef(owner string, repo string, ref string) (*Reference, *Response, error) {
+ ref = strings.TrimPrefix(ref, "refs/")
+ u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := new(Reference)
+ resp, err := s.client.Do(req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
+
+// ReferenceListOptions specifies optional parameters to the
+// GitService.ListRefs method.
+type ReferenceListOptions struct {
+ Type string `url:"-"`
+
+ ListOptions
+}
+
+// ListRefs lists all refs in a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/git/refs/#get-all-references
+func (s *GitService) ListRefs(owner, repo string, opt *ReferenceListOptions) ([]*Reference, *Response, error) {
+ var u string
+ if opt != nil && opt.Type != "" {
+ u = fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, opt.Type)
+ } else {
+ u = fmt.Sprintf("repos/%v/%v/git/refs", owner, repo)
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var rs []*Reference
+ resp, err := s.client.Do(req, &rs)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return rs, resp, err
+}
+
+// CreateRef creates a new ref in a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/git/refs/#create-a-reference
+func (s *GitService) CreateRef(owner string, repo string, ref *Reference) (*Reference, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/refs", owner, repo)
+ req, err := s.client.NewRequest("POST", u, &createRefRequest{
+ // back-compat with previous behavior that didn't require 'refs/' prefix
+ Ref: String("refs/" + strings.TrimPrefix(*ref.Ref, "refs/")),
+ SHA: ref.Object.SHA,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := new(Reference)
+ resp, err := s.client.Do(req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
+
+// UpdateRef updates an existing ref in a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/git/refs/#update-a-reference
+func (s *GitService) UpdateRef(owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) {
+ refPath := strings.TrimPrefix(*ref.Ref, "refs/")
+ u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refPath)
+ req, err := s.client.NewRequest("PATCH", u, &updateRefRequest{
+ SHA: ref.Object.SHA,
+ Force: &force,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := new(Reference)
+ resp, err := s.client.Do(req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
+
+// DeleteRef deletes a ref from a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/git/refs/#delete-a-reference
+func (s *GitService) DeleteRef(owner string, repo string, ref string) (*Response, error) {
+ ref = strings.TrimPrefix(ref, "refs/")
+ u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/git_tags.go b/vendor/github.com/google/go-github/github/git_tags.go
new file mode 100644
index 0000000..01b9cb2
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/git_tags.go
@@ -0,0 +1,77 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+)
+
+// Tag represents a tag object.
+type Tag struct {
+ Tag *string `json:"tag,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Tagger *CommitAuthor `json:"tagger,omitempty"`
+ Object *GitObject `json:"object,omitempty"`
+ Verification *SignatureVerification `json:"verification,omitempty"`
+}
+
+// createTagRequest represents the body of a CreateTag request. This is mostly
+// identical to Tag with the exception that the object SHA and Type are
+// top-level fields, rather than being nested inside a JSON object.
+type createTagRequest struct {
+ Tag *string `json:"tag,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Object *string `json:"object,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Tagger *CommitAuthor `json:"tagger,omitempty"`
+}
+
+// GetTag fetchs a tag from a repo given a SHA.
+//
+// GitHub API docs: http://developer.github.com/v3/git/tags/#get-a-tag
+func (s *GitService) GetTag(owner string, repo string, sha string) (*Tag, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/tags/%v", owner, repo, sha)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
+ tag := new(Tag)
+ resp, err := s.client.Do(req, tag)
+ return tag, resp, err
+}
+
+// CreateTag creates a tag object.
+//
+// GitHub API docs: http://developer.github.com/v3/git/tags/#create-a-tag-object
+func (s *GitService) CreateTag(owner string, repo string, tag *Tag) (*Tag, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/tags", owner, repo)
+
+ // convert Tag into a createTagRequest
+ tagRequest := &createTagRequest{
+ Tag: tag.Tag,
+ Message: tag.Message,
+ Tagger: tag.Tagger,
+ }
+ if tag.Object != nil {
+ tagRequest.Object = tag.Object.SHA
+ tagRequest.Type = tag.Object.Type
+ }
+
+ req, err := s.client.NewRequest("POST", u, tagRequest)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Tag)
+ resp, err := s.client.Do(req, t)
+ return t, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/git_trees.go b/vendor/github.com/google/go-github/github/git_trees.go
new file mode 100644
index 0000000..9efa4b3
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/git_trees.go
@@ -0,0 +1,89 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Tree represents a GitHub tree.
+type Tree struct {
+ SHA *string `json:"sha,omitempty"`
+ Entries []TreeEntry `json:"tree,omitempty"`
+}
+
+func (t Tree) String() string {
+ return Stringify(t)
+}
+
+// TreeEntry represents the contents of a tree structure. TreeEntry can
+// represent either a blob, a commit (in the case of a submodule), or another
+// tree.
+type TreeEntry struct {
+ SHA *string `json:"sha,omitempty"`
+ Path *string `json:"path,omitempty"`
+ Mode *string `json:"mode,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Size *int `json:"size,omitempty"`
+ Content *string `json:"content,omitempty"`
+}
+
+func (t TreeEntry) String() string {
+ return Stringify(t)
+}
+
+// GetTree fetches the Tree object for a given sha hash from a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/git/trees/#get-a-tree
+func (s *GitService) GetTree(owner string, repo string, sha string, recursive bool) (*Tree, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/trees/%v", owner, repo, sha)
+ if recursive {
+ u += "?recursive=1"
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Tree)
+ resp, err := s.client.Do(req, t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// createTree represents the body of a CreateTree request.
+type createTree struct {
+ BaseTree string `json:"base_tree,omitempty"`
+ Entries []TreeEntry `json:"tree"`
+}
+
+// CreateTree creates a new tree in a repository. If both a tree and a nested
+// path modifying that tree are specified, it will overwrite the contents of
+// that tree with the new path contents and write a new tree out.
+//
+// GitHub API docs: http://developer.github.com/v3/git/trees/#create-a-tree
+func (s *GitService) CreateTree(owner string, repo string, baseTree string, entries []TreeEntry) (*Tree, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/git/trees", owner, repo)
+
+ body := &createTree{
+ BaseTree: baseTree,
+ Entries: entries,
+ }
+ req, err := s.client.NewRequest("POST", u, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Tree)
+ resp, err := s.client.Do(req, t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go
new file mode 100644
index 0000000..0c8a677
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/github.go
@@ -0,0 +1,895 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/google/go-querystring/query"
+)
+
+const (
+ // StatusUnprocessableEntity is the status code returned when sending a request with invalid fields.
+ StatusUnprocessableEntity = 422
+)
+
+const (
+ libraryVersion = "2"
+ defaultBaseURL = "https://api.github.com/"
+ uploadBaseURL = "https://uploads.github.com/"
+ userAgent = "go-github/" + libraryVersion
+
+ headerRateLimit = "X-RateLimit-Limit"
+ headerRateRemaining = "X-RateLimit-Remaining"
+ headerRateReset = "X-RateLimit-Reset"
+ headerOTP = "X-GitHub-OTP"
+
+ mediaTypeV3 = "application/vnd.github.v3+json"
+ defaultMediaType = "application/octet-stream"
+ mediaTypeV3SHA = "application/vnd.github.v3.sha"
+ mediaTypeV3Diff = "application/vnd.github.v3.diff"
+ mediaTypeV3Patch = "application/vnd.github.v3.patch"
+ mediaTypeOrgPermissionRepo = "application/vnd.github.v3.repository+json"
+
+ // Media Type values to access preview APIs
+
+ // https://developer.github.com/changes/2015-03-09-licenses-api/
+ mediaTypeLicensesPreview = "application/vnd.github.drax-preview+json"
+
+ // https://developer.github.com/changes/2014-12-09-new-attributes-for-stars-api/
+ mediaTypeStarringPreview = "application/vnd.github.v3.star+json"
+
+ // https://developer.github.com/changes/2015-11-11-protected-branches-api/
+ mediaTypeProtectedBranchesPreview = "application/vnd.github.loki-preview+json"
+
+ // https://help.github.com/enterprise/2.4/admin/guides/migrations/exporting-the-github-com-organization-s-repositories/
+ mediaTypeMigrationsPreview = "application/vnd.github.wyandotte-preview+json"
+
+ // https://developer.github.com/changes/2016-04-06-deployment-and-deployment-status-enhancements/
+ mediaTypeDeploymentStatusPreview = "application/vnd.github.ant-man-preview+json"
+
+ // https://developer.github.com/changes/2016-02-19-source-import-preview-api/
+ mediaTypeImportPreview = "application/vnd.github.barred-rock-preview"
+
+ // https://developer.github.com/changes/2016-05-12-reactions-api-preview/
+ mediaTypeReactionsPreview = "application/vnd.github.squirrel-girl-preview"
+
+ // https://developer.github.com/changes/2016-04-01-squash-api-preview/
+ // https://developer.github.com/changes/2016-09-26-pull-request-merge-api-update/
+ mediaTypeSquashPreview = "application/vnd.github.polaris-preview+json"
+
+ // https://developer.github.com/changes/2016-04-04-git-signing-api-preview/
+ mediaTypeGitSigningPreview = "application/vnd.github.cryptographer-preview+json"
+
+ // https://developer.github.com/changes/2016-05-23-timeline-preview-api/
+ mediaTypeTimelinePreview = "application/vnd.github.mockingbird-preview+json"
+
+ // https://developer.github.com/changes/2016-06-14-repository-invitations/
+ mediaTypeRepositoryInvitationsPreview = "application/vnd.github.swamp-thing-preview+json"
+
+ // https://developer.github.com/changes/2016-07-06-github-pages-preiew-api/
+ mediaTypePagesPreview = "application/vnd.github.mister-fantastic-preview+json"
+
+ // https://developer.github.com/changes/2016-09-14-projects-api/
+ mediaTypeProjectsPreview = "application/vnd.github.inertia-preview+json"
+
+ // https://developer.github.com/changes/2016-09-14-Integrations-Early-Access/
+ mediaTypeIntegrationPreview = "application/vnd.github.machine-man-preview+json"
+
+ // https://developer.github.com/changes/2016-11-28-preview-org-membership/
+ mediaTypeOrgMembershipPreview = "application/vnd.github.korra-preview+json"
+)
+
+// A Client manages communication with the GitHub API.
+type Client struct {
+ clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func.
+ client *http.Client // HTTP client used to communicate with the API.
+
+ // Base URL for API requests. Defaults to the public GitHub API, but can be
+ // set to a domain endpoint to use with GitHub Enterprise. BaseURL should
+ // always be specified with a trailing slash.
+ BaseURL *url.URL
+
+ // Base URL for uploading files.
+ UploadURL *url.URL
+
+ // User agent used when communicating with the GitHub API.
+ UserAgent string
+
+ rateMu sync.Mutex
+ rateLimits [categories]Rate // Rate limits for the client as determined by the most recent API calls.
+ mostRecent rateLimitCategory
+
+ common service // Reuse a single struct instead of allocating one for each service on the heap.
+
+ // Services used for talking to different parts of the GitHub API.
+ Activity *ActivityService
+ Admin *AdminService
+ Authorizations *AuthorizationsService
+ Gists *GistsService
+ Git *GitService
+ Gitignores *GitignoresService
+ Integrations *IntegrationsService
+ Issues *IssuesService
+ Organizations *OrganizationsService
+ Projects *ProjectsService
+ PullRequests *PullRequestsService
+ Repositories *RepositoriesService
+ Search *SearchService
+ Users *UsersService
+ Licenses *LicensesService
+ Migrations *MigrationService
+ Reactions *ReactionsService
+}
+
+type service struct {
+ client *Client
+}
+
+// ListOptions specifies the optional parameters to various List methods that
+// support pagination.
+type ListOptions struct {
+ // For paginated result sets, page of results to retrieve.
+ Page int `url:"page,omitempty"`
+
+ // For paginated result sets, the number of results to include per page.
+ PerPage int `url:"per_page,omitempty"`
+}
+
+// UploadOptions specifies the parameters to methods that support uploads.
+type UploadOptions struct {
+ Name string `url:"name,omitempty"`
+}
+
+// RawType represents type of raw format of a request instead of JSON.
+type RawType uint8
+
+const (
+ // Diff format.
+ Diff RawType = 1 + iota
+ // Patch format.
+ Patch
+)
+
+// RawOptions specifies parameters when user wants to get raw format of
+// a response instead of JSON.
+type RawOptions struct {
+ Type RawType
+}
+
+// addOptions adds the parameters in opt as URL query parameters to s. opt
+// must be a struct whose fields may contain "url" tags.
+func addOptions(s string, opt interface{}) (string, error) {
+ v := reflect.ValueOf(opt)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ return s, nil
+ }
+
+ u, err := url.Parse(s)
+ if err != nil {
+ return s, err
+ }
+
+ qs, err := query.Values(opt)
+ if err != nil {
+ return s, err
+ }
+
+ u.RawQuery = qs.Encode()
+ return u.String(), nil
+}
+
+// NewClient returns a new GitHub API client. If a nil httpClient is
+// provided, http.DefaultClient will be used. To use API methods which require
+// authentication, provide an http.Client that will perform the authentication
+// for you (such as that provided by the golang.org/x/oauth2 library).
+func NewClient(httpClient *http.Client) *Client {
+ if httpClient == nil {
+ httpClient = http.DefaultClient
+ }
+ baseURL, _ := url.Parse(defaultBaseURL)
+ uploadURL, _ := url.Parse(uploadBaseURL)
+
+ c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent, UploadURL: uploadURL}
+ c.common.client = c
+ c.Activity = (*ActivityService)(&c.common)
+ c.Admin = (*AdminService)(&c.common)
+ c.Authorizations = (*AuthorizationsService)(&c.common)
+ c.Gists = (*GistsService)(&c.common)
+ c.Git = (*GitService)(&c.common)
+ c.Gitignores = (*GitignoresService)(&c.common)
+ c.Integrations = (*IntegrationsService)(&c.common)
+ c.Issues = (*IssuesService)(&c.common)
+ c.Licenses = (*LicensesService)(&c.common)
+ c.Migrations = (*MigrationService)(&c.common)
+ c.Organizations = (*OrganizationsService)(&c.common)
+ c.Projects = (*ProjectsService)(&c.common)
+ c.PullRequests = (*PullRequestsService)(&c.common)
+ c.Reactions = (*ReactionsService)(&c.common)
+ c.Repositories = (*RepositoriesService)(&c.common)
+ c.Search = (*SearchService)(&c.common)
+ c.Users = (*UsersService)(&c.common)
+ return c
+}
+
+// NewRequest creates an API request. A relative URL can be provided in urlStr,
+// in which case it is resolved relative to the BaseURL of the Client.
+// Relative URLs should always be specified without a preceding slash. If
+// specified, the value pointed to by body is JSON encoded and included as the
+// request body.
+func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {
+ rel, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, err
+ }
+
+ u := c.BaseURL.ResolveReference(rel)
+
+ var buf io.ReadWriter
+ if body != nil {
+ buf = new(bytes.Buffer)
+ err := json.NewEncoder(buf).Encode(body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ req, err := http.NewRequest(method, u.String(), buf)
+ if err != nil {
+ return nil, err
+ }
+
+ if body != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+ req.Header.Set("Accept", mediaTypeV3)
+ if c.UserAgent != "" {
+ req.Header.Set("User-Agent", c.UserAgent)
+ }
+ return req, nil
+}
+
+// NewUploadRequest creates an upload request. A relative URL can be provided in
+// urlStr, in which case it is resolved relative to the UploadURL of the Client.
+// Relative URLs should always be specified without a preceding slash.
+func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string) (*http.Request, error) {
+ rel, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, err
+ }
+
+ u := c.UploadURL.ResolveReference(rel)
+ req, err := http.NewRequest("POST", u.String(), reader)
+ if err != nil {
+ return nil, err
+ }
+ req.ContentLength = size
+
+ if mediaType == "" {
+ mediaType = defaultMediaType
+ }
+ req.Header.Set("Content-Type", mediaType)
+ req.Header.Set("Accept", mediaTypeV3)
+ req.Header.Set("User-Agent", c.UserAgent)
+ return req, nil
+}
+
+// Response is a GitHub API response. This wraps the standard http.Response
+// returned from GitHub and provides convenient access to things like
+// pagination links.
+type Response struct {
+ *http.Response
+
+ // These fields provide the page values for paginating through a set of
+ // results. Any or all of these may be set to the zero value for
+ // responses that are not part of a paginated set, or for which there
+ // are no additional pages.
+
+ NextPage int
+ PrevPage int
+ FirstPage int
+ LastPage int
+
+ Rate
+}
+
+// newResponse creates a new Response for the provided http.Response.
+func newResponse(r *http.Response) *Response {
+ response := &Response{Response: r}
+ response.populatePageValues()
+ response.Rate = parseRate(r)
+ return response
+}
+
+// populatePageValues parses the HTTP Link response headers and populates the
+// various pagination link values in the Response.
+func (r *Response) populatePageValues() {
+ if links, ok := r.Response.Header["Link"]; ok && len(links) > 0 {
+ for _, link := range strings.Split(links[0], ",") {
+ segments := strings.Split(strings.TrimSpace(link), ";")
+
+ // link must at least have href and rel
+ if len(segments) < 2 {
+ continue
+ }
+
+ // ensure href is properly formatted
+ if !strings.HasPrefix(segments[0], "<") || !strings.HasSuffix(segments[0], ">") {
+ continue
+ }
+
+ // try to pull out page parameter
+ url, err := url.Parse(segments[0][1 : len(segments[0])-1])
+ if err != nil {
+ continue
+ }
+ page := url.Query().Get("page")
+ if page == "" {
+ continue
+ }
+
+ for _, segment := range segments[1:] {
+ switch strings.TrimSpace(segment) {
+ case `rel="next"`:
+ r.NextPage, _ = strconv.Atoi(page)
+ case `rel="prev"`:
+ r.PrevPage, _ = strconv.Atoi(page)
+ case `rel="first"`:
+ r.FirstPage, _ = strconv.Atoi(page)
+ case `rel="last"`:
+ r.LastPage, _ = strconv.Atoi(page)
+ }
+
+ }
+ }
+ }
+}
+
+// parseRate parses the rate related headers.
+func parseRate(r *http.Response) Rate {
+ var rate Rate
+ if limit := r.Header.Get(headerRateLimit); limit != "" {
+ rate.Limit, _ = strconv.Atoi(limit)
+ }
+ if remaining := r.Header.Get(headerRateRemaining); remaining != "" {
+ rate.Remaining, _ = strconv.Atoi(remaining)
+ }
+ if reset := r.Header.Get(headerRateReset); reset != "" {
+ if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 {
+ rate.Reset = Timestamp{time.Unix(v, 0)}
+ }
+ }
+ return rate
+}
+
+// Rate specifies the current rate limit for the client as determined by the
+// most recent API call. If the client is used in a multi-user application,
+// this rate may not always be up-to-date.
+//
+// Deprecated: Use the Response.Rate returned from most recent API call instead.
+// Call RateLimits() to check the current rate.
+func (c *Client) Rate() Rate {
+ c.rateMu.Lock()
+ rate := c.rateLimits[c.mostRecent]
+ c.rateMu.Unlock()
+ return rate
+}
+
+// Do sends an API request and returns the API response. The API response is
+// JSON decoded and stored in the value pointed to by v, or returned as an
+// error if an API error has occurred. If v implements the io.Writer
+// interface, the raw response body will be written to v, without attempting to
+// first decode it. If rate limit is exceeded and reset time is in the future,
+// Do returns *RateLimitError immediately without making a network API call.
+func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
+ rateLimitCategory := category(req.URL.Path)
+
+ // If we've hit rate limit, don't make further requests before Reset time.
+ if err := c.checkRateLimitBeforeDo(req, rateLimitCategory); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.client.Do(req)
+ if err != nil {
+ if e, ok := err.(*url.Error); ok {
+ if url, err := url.Parse(e.URL); err == nil {
+ e.URL = sanitizeURL(url).String()
+ return nil, e
+ }
+ }
+ return nil, err
+ }
+
+ defer func() {
+ // Drain up to 512 bytes and close the body to let the Transport reuse the connection
+ io.CopyN(ioutil.Discard, resp.Body, 512)
+ resp.Body.Close()
+ }()
+
+ response := newResponse(resp)
+
+ c.rateMu.Lock()
+ c.rateLimits[rateLimitCategory] = response.Rate
+ c.mostRecent = rateLimitCategory
+ c.rateMu.Unlock()
+
+ err = CheckResponse(resp)
+ if err != nil {
+ // even though there was an error, we still return the response
+ // in case the caller wants to inspect it further
+ return response, err
+ }
+
+ if v != nil {
+ if w, ok := v.(io.Writer); ok {
+ io.Copy(w, resp.Body)
+ } else {
+ err = json.NewDecoder(resp.Body).Decode(v)
+ if err == io.EOF {
+ err = nil // ignore EOF errors caused by empty response body
+ }
+ }
+ }
+
+ return response, err
+}
+
+// checkRateLimitBeforeDo does not make any network calls, but uses existing knowledge from
+// current client state in order to quickly check if *RateLimitError can be immediately returned
+// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unnecessarily.
+// Otherwise it returns nil, and Client.Do should proceed normally.
+func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rateLimitCategory) error {
+ c.rateMu.Lock()
+ rate := c.rateLimits[rateLimitCategory]
+ c.rateMu.Unlock()
+ if !rate.Reset.Time.IsZero() && rate.Remaining == 0 && time.Now().Before(rate.Reset.Time) {
+ // Create a fake response.
+ resp := &http.Response{
+ Status: http.StatusText(http.StatusForbidden),
+ StatusCode: http.StatusForbidden,
+ Request: req,
+ Header: make(http.Header),
+ Body: ioutil.NopCloser(strings.NewReader("")),
+ }
+ return &RateLimitError{
+ Rate: rate,
+ Response: resp,
+ Message: fmt.Sprintf("API rate limit of %v still exceeded until %v, not making remote request.", rate.Limit, rate.Reset.Time),
+ }
+ }
+
+ return nil
+}
+
+/*
+An ErrorResponse reports one or more errors caused by an API request.
+
+GitHub API docs: http://developer.github.com/v3/#client-errors
+*/
+type ErrorResponse struct {
+ Response *http.Response // HTTP response that caused this error
+ Message string `json:"message"` // error message
+ Errors []Error `json:"errors"` // more detail on individual errors
+ // Block is only populated on certain types of errors such as code 451.
+ // See https://developer.github.com/changes/2016-03-17-the-451-status-code-is-now-supported/
+ // for more information.
+ Block *struct {
+ Reason string `json:"reason,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ } `json:"block,omitempty"`
+ // Most errors will also include a documentation_url field pointing
+ // to some content that might help you resolve the error, see
+ // https://developer.github.com/v3/#client-errors
+ DocumentationURL string `json:"documentation_url,omitempty"`
+}
+
+func (r *ErrorResponse) Error() string {
+ return fmt.Sprintf("%v %v: %d %v %+v",
+ r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
+ r.Response.StatusCode, r.Message, r.Errors)
+}
+
+// TwoFactorAuthError occurs when using HTTP Basic Authentication for a user
+// that has two-factor authentication enabled. The request can be reattempted
+// by providing a one-time password in the request.
+type TwoFactorAuthError ErrorResponse
+
+func (r *TwoFactorAuthError) Error() string { return (*ErrorResponse)(r).Error() }
+
+// RateLimitError occurs when GitHub returns 403 Forbidden response with a rate limit
+// remaining value of 0, and error message starts with "API rate limit exceeded for ".
+type RateLimitError struct {
+ Rate Rate // Rate specifies last known rate limit for the client
+ Response *http.Response // HTTP response that caused this error
+ Message string `json:"message"` // error message
+}
+
+func (r *RateLimitError) Error() string {
+ return fmt.Sprintf("%v %v: %d %v; rate reset in %v",
+ r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
+ r.Response.StatusCode, r.Message, r.Rate.Reset.Time.Sub(time.Now()))
+}
+
+// AcceptedError occurs when GitHub returns 202 Accepted response with an
+// empty body, which means a job was scheduled on the GitHub side to process
+// the information needed and cache it.
+// Technically, 202 Accepted is not a real error, it's just used to
+// indicate that results are not ready yet, but should be available soon.
+// The request can be repeated after some time.
+type AcceptedError struct{}
+
+func (*AcceptedError) Error() string {
+ return "job scheduled on GitHub side; try again later"
+}
+
+// AbuseRateLimitError occurs when GitHub returns 403 Forbidden response with the
+// "documentation_url" field value equal to "https://developer.github.com/v3#abuse-rate-limits".
+type AbuseRateLimitError struct {
+ Response *http.Response // HTTP response that caused this error
+ Message string `json:"message"` // error message
+
+ // RetryAfter is provided with some abuse rate limit errors. If present,
+ // it is the amount of time that the client should wait before retrying.
+ // Otherwise, the client should try again later (after an unspecified amount of time).
+ RetryAfter *time.Duration
+}
+
+func (r *AbuseRateLimitError) Error() string {
+ return fmt.Sprintf("%v %v: %d %v",
+ r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
+ r.Response.StatusCode, r.Message)
+}
+
+// sanitizeURL redacts the client_secret parameter from the URL which may be
+// exposed to the user.
+func sanitizeURL(uri *url.URL) *url.URL {
+ if uri == nil {
+ return nil
+ }
+ params := uri.Query()
+ if len(params.Get("client_secret")) > 0 {
+ params.Set("client_secret", "REDACTED")
+ uri.RawQuery = params.Encode()
+ }
+ return uri
+}
+
+/*
+An Error reports more details on an individual error in an ErrorResponse.
+These are the possible validation error codes:
+
+ missing:
+ resource does not exist
+ missing_field:
+ a required field on a resource has not been set
+ invalid:
+ the formatting of a field is invalid
+ already_exists:
+ another resource has the same valid as this field
+ custom:
+ some resources return this (e.g. github.User.CreateKey()), additional
+ information is set in the Message field of the Error
+
+GitHub API docs: http://developer.github.com/v3/#client-errors
+*/
+type Error struct {
+ Resource string `json:"resource"` // resource on which the error occurred
+ Field string `json:"field"` // field on which the error occurred
+ Code string `json:"code"` // validation error code
+ Message string `json:"message"` // Message describing the error. Errors with Code == "custom" will always have this set.
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("%v error caused by %v field on %v resource",
+ e.Code, e.Field, e.Resource)
+}
+
+// CheckResponse checks the API response for errors, and returns them if
+// present. A response is considered an error if it has a status code outside
+// the 200 range or equal to 202 Accepted.
+// API error responses are expected to have either no response
+// body, or a JSON response body that maps to ErrorResponse. Any other
+// response body will be silently ignored.
+//
+// The error type will be *RateLimitError for rate limit exceeded errors,
+// *AcceptedError for 202 Accepted status codes,
+// and *TwoFactorAuthError for two-factor authentication errors.
+func CheckResponse(r *http.Response) error {
+ if r.StatusCode == http.StatusAccepted {
+ return &AcceptedError{}
+ }
+ if c := r.StatusCode; 200 <= c && c <= 299 {
+ return nil
+ }
+ errorResponse := &ErrorResponse{Response: r}
+ data, err := ioutil.ReadAll(r.Body)
+ if err == nil && data != nil {
+ json.Unmarshal(data, errorResponse)
+ }
+ switch {
+ case r.StatusCode == http.StatusUnauthorized && strings.HasPrefix(r.Header.Get(headerOTP), "required"):
+ return (*TwoFactorAuthError)(errorResponse)
+ case r.StatusCode == http.StatusForbidden && r.Header.Get(headerRateRemaining) == "0" && strings.HasPrefix(errorResponse.Message, "API rate limit exceeded for "):
+ return &RateLimitError{
+ Rate: parseRate(r),
+ Response: errorResponse.Response,
+ Message: errorResponse.Message,
+ }
+ case r.StatusCode == http.StatusForbidden && errorResponse.DocumentationURL == "https://developer.github.com/v3#abuse-rate-limits":
+ abuseRateLimitError := &AbuseRateLimitError{
+ Response: errorResponse.Response,
+ Message: errorResponse.Message,
+ }
+ if v := r.Header["Retry-After"]; len(v) > 0 {
+ // According to GitHub support, the "Retry-After" header value will be
+ // an integer which represents the number of seconds that one should
+ // wait before resuming making requests.
+ retryAfterSeconds, _ := strconv.ParseInt(v[0], 10, 64) // Error handling is noop.
+ retryAfter := time.Duration(retryAfterSeconds) * time.Second
+ abuseRateLimitError.RetryAfter = &retryAfter
+ }
+ return abuseRateLimitError
+ default:
+ return errorResponse
+ }
+}
+
+// parseBoolResponse determines the boolean result from a GitHub API response.
+// Several GitHub API methods return boolean responses indicated by the HTTP
+// status code in the response (true indicated by a 204, false indicated by a
+// 404). This helper function will determine that result and hide the 404
+// error if present. Any other error will be returned through as-is.
+func parseBoolResponse(err error) (bool, error) {
+ if err == nil {
+ return true, nil
+ }
+
+ if err, ok := err.(*ErrorResponse); ok && err.Response.StatusCode == http.StatusNotFound {
+ // Simply false. In this one case, we do not pass the error through.
+ return false, nil
+ }
+
+ // some other real error occurred
+ return false, err
+}
+
+// Rate represents the rate limit for the current client.
+type Rate struct {
+ // The number of requests per hour the client is currently limited to.
+ Limit int `json:"limit"`
+
+ // The number of remaining requests the client can make this hour.
+ Remaining int `json:"remaining"`
+
+ // The time at which the current rate limit will reset.
+ Reset Timestamp `json:"reset"`
+}
+
+func (r Rate) String() string {
+ return Stringify(r)
+}
+
+// RateLimits represents the rate limits for the current client.
+type RateLimits struct {
+ // The rate limit for non-search API requests. Unauthenticated
+ // requests are limited to 60 per hour. Authenticated requests are
+ // limited to 5,000 per hour.
+ //
+ // GitHub API docs: https://developer.github.com/v3/#rate-limiting
+ Core *Rate `json:"core"`
+
+ // The rate limit for search API requests. Unauthenticated requests
+ // are limited to 10 requests per minutes. Authenticated requests are
+ // limited to 30 per minute.
+ //
+ // GitHub API docs: https://developer.github.com/v3/search/#rate-limit
+ Search *Rate `json:"search"`
+}
+
+func (r RateLimits) String() string {
+ return Stringify(r)
+}
+
+type rateLimitCategory uint8
+
+const (
+ coreCategory rateLimitCategory = iota
+ searchCategory
+
+ categories // An array of this length will be able to contain all rate limit categories.
+)
+
+// category returns the rate limit category of the endpoint, determined by Request.URL.Path.
+func category(path string) rateLimitCategory {
+ switch {
+ default:
+ return coreCategory
+ case strings.HasPrefix(path, "/search/"):
+ return searchCategory
+ }
+}
+
+// RateLimit returns the core rate limit for the current client.
+//
+// Deprecated: RateLimit is deprecated, use RateLimits instead.
+func (c *Client) RateLimit() (*Rate, *Response, error) {
+ limits, resp, err := c.RateLimits()
+ if limits == nil {
+ return nil, nil, err
+ }
+
+ return limits.Core, resp, err
+}
+
+// RateLimits returns the rate limits for the current client.
+func (c *Client) RateLimits() (*RateLimits, *Response, error) {
+ req, err := c.NewRequest("GET", "rate_limit", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ response := new(struct {
+ Resources *RateLimits `json:"resources"`
+ })
+ resp, err := c.Do(req, response)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if response.Resources != nil {
+ c.rateMu.Lock()
+ if response.Resources.Core != nil {
+ c.rateLimits[coreCategory] = *response.Resources.Core
+ }
+ if response.Resources.Search != nil {
+ c.rateLimits[searchCategory] = *response.Resources.Search
+ }
+ c.rateMu.Unlock()
+ }
+
+ return response.Resources, resp, err
+}
+
+/*
+UnauthenticatedRateLimitedTransport allows you to make unauthenticated calls
+that need to use a higher rate limit associated with your OAuth application.
+
+ t := &github.UnauthenticatedRateLimitedTransport{
+ ClientID: "your app's client ID",
+ ClientSecret: "your app's client secret",
+ }
+ client := github.NewClient(t.Client())
+
+This will append the querystring params client_id=xxx&client_secret=yyy to all
+requests.
+
+See http://developer.github.com/v3/#unauthenticated-rate-limited-requests for
+more information.
+*/
+type UnauthenticatedRateLimitedTransport struct {
+ // ClientID is the GitHub OAuth client ID of the current application, which
+ // can be found by selecting its entry in the list at
+ // https://github.com/settings/applications.
+ ClientID string
+
+ // ClientSecret is the GitHub OAuth client secret of the current
+ // application.
+ ClientSecret string
+
+ // Transport is the underlying HTTP transport to use when making requests.
+ // It will default to http.DefaultTransport if nil.
+ Transport http.RoundTripper
+}
+
+// RoundTrip implements the RoundTripper interface.
+func (t *UnauthenticatedRateLimitedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if t.ClientID == "" {
+ return nil, errors.New("t.ClientID is empty")
+ }
+ if t.ClientSecret == "" {
+ return nil, errors.New("t.ClientSecret is empty")
+ }
+
+ // To set extra querystring params, we must make a copy of the Request so
+ // that we don't modify the Request we were given. This is required by the
+ // specification of http.RoundTripper.
+ req = cloneRequest(req)
+ q := req.URL.Query()
+ q.Set("client_id", t.ClientID)
+ q.Set("client_secret", t.ClientSecret)
+ req.URL.RawQuery = q.Encode()
+
+ // Make the HTTP request.
+ return t.transport().RoundTrip(req)
+}
+
+// Client returns an *http.Client that makes requests which are subject to the
+// rate limit of your OAuth application.
+func (t *UnauthenticatedRateLimitedTransport) Client() *http.Client {
+ return &http.Client{Transport: t}
+}
+
+func (t *UnauthenticatedRateLimitedTransport) transport() http.RoundTripper {
+ if t.Transport != nil {
+ return t.Transport
+ }
+ return http.DefaultTransport
+}
+
+// BasicAuthTransport is an http.RoundTripper that authenticates all requests
+// using HTTP Basic Authentication with the provided username and password. It
+// additionally supports users who have two-factor authentication enabled on
+// their GitHub account.
+type BasicAuthTransport struct {
+ Username string // GitHub username
+ Password string // GitHub password
+ OTP string // one-time password for users with two-factor auth enabled
+
+ // Transport is the underlying HTTP transport to use when making requests.
+ // It will default to http.DefaultTransport if nil.
+ Transport http.RoundTripper
+}
+
+// RoundTrip implements the RoundTripper interface.
+func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req = cloneRequest(req) // per RoundTrip contract
+ req.SetBasicAuth(t.Username, t.Password)
+ if t.OTP != "" {
+ req.Header.Set(headerOTP, t.OTP)
+ }
+ return t.transport().RoundTrip(req)
+}
+
+// Client returns an *http.Client that makes requests that are authenticated
+// using HTTP Basic Authentication.
+func (t *BasicAuthTransport) Client() *http.Client {
+ return &http.Client{Transport: t}
+}
+
+func (t *BasicAuthTransport) transport() http.RoundTripper {
+ if t.Transport != nil {
+ return t.Transport
+ }
+ return http.DefaultTransport
+}
+
+// cloneRequest returns a clone of the provided *http.Request. The clone is a
+// shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int is a helper routine that allocates a new int value
+// to store v and returns a pointer to it.
+func Int(v int) *int { return &v }
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/google/go-github/github/gitignore.go b/vendor/github.com/google/go-github/github/gitignore.go
new file mode 100644
index 0000000..faaceb5
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/gitignore.go
@@ -0,0 +1,61 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// GitignoresService provides access to the gitignore related functions in the
+// GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/gitignore/
+type GitignoresService service
+
+// Gitignore represents a .gitignore file as returned by the GitHub API.
+type Gitignore struct {
+ Name *string `json:"name,omitempty"`
+ Source *string `json:"source,omitempty"`
+}
+
+func (g Gitignore) String() string {
+ return Stringify(g)
+}
+
+// List all available Gitignore templates.
+//
+// http://developer.github.com/v3/gitignore/#listing-available-templates
+func (s GitignoresService) List() ([]string, *Response, error) {
+ req, err := s.client.NewRequest("GET", "gitignore/templates", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ availableTemplates := new([]string)
+ resp, err := s.client.Do(req, availableTemplates)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *availableTemplates, resp, err
+}
+
+// Get a Gitignore by name.
+//
+// http://developer.github.com/v3/gitignore/#get-a-single-template
+func (s GitignoresService) Get(name string) (*Gitignore, *Response, error) {
+ u := fmt.Sprintf("gitignore/templates/%v", name)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gitignore := new(Gitignore)
+ resp, err := s.client.Do(req, gitignore)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return gitignore, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/integration.go b/vendor/github.com/google/go-github/github/integration.go
new file mode 100644
index 0000000..b8d77ca
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/integration.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+// IntegrationsService provides access to the installation related functions
+// in the GitHub API.
+//
+// GitHub API docs: https://developer.github.com/v3/integrations/
+type IntegrationsService service
+
+// ListInstallations lists the installations that the current integration has.
+//
+// GitHub API docs: https://developer.github.com/v3/integrations/#find-installations
+func (s *IntegrationsService) ListInstallations(opt *ListOptions) ([]*Installation, *Response, error) {
+ u, err := addOptions("integration/installations", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeIntegrationPreview)
+
+ i := new([]*Installation)
+ resp, err := s.client.Do(req, &i)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *i, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/integration_installation.go b/vendor/github.com/google/go-github/github/integration_installation.go
new file mode 100644
index 0000000..aa59bfe
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/integration_installation.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+// Installation represents a GitHub integration installation.
+type Installation struct {
+ ID *int `json:"id,omitempty"`
+ Account *User `json:"account,omitempty"`
+ AccessTokensURL *string `json:"access_tokens_url,omitempty"`
+ RepositoriesURL *string `json:"repositories_url,omitempty"`
+}
+
+func (i Installation) String() string {
+ return Stringify(i)
+}
+
+// ListRepos lists the repositories that the current installation has access to.
+//
+// GitHub API docs: https://developer.github.com/v3/integrations/installations/#list-repositories
+func (s *IntegrationsService) ListRepos(opt *ListOptions) ([]*Repository, *Response, error) {
+ u, err := addOptions("installation/repositories", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeIntegrationPreview)
+
+ var r struct {
+ Repositories []*Repository `json:"repositories"`
+ }
+ resp, err := s.client.Do(req, &r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r.Repositories, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/issues.go b/vendor/github.com/google/go-github/github/issues.go
new file mode 100644
index 0000000..d8e7d41
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/issues.go
@@ -0,0 +1,304 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// IssuesService handles communication with the issue related
+// methods of the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/
+type IssuesService service
+
+// Issue represents a GitHub issue on a repository.
+//
+// Note: As far as the GitHub API is concerned, every pull request is an issue,
+// but not every issue is a pull request. Some endpoints, events, and webhooks
+// may also return pull requests via this struct. If PullRequestLinks is nil,
+// this is an issue, and if PullRequestLinks is not nil, this is a pull request.
+type Issue struct {
+ ID *int `json:"id,omitempty"`
+ Number *int `json:"number,omitempty"`
+ State *string `json:"state,omitempty"`
+ Title *string `json:"title,omitempty"`
+ Body *string `json:"body,omitempty"`
+ User *User `json:"user,omitempty"`
+ Labels []Label `json:"labels,omitempty"`
+ Assignee *User `json:"assignee,omitempty"`
+ Comments *int `json:"comments,omitempty"`
+ ClosedAt *time.Time `json:"closed_at,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ Milestone *Milestone `json:"milestone,omitempty"`
+ PullRequestLinks *PullRequestLinks `json:"pull_request,omitempty"`
+ Repository *Repository `json:"repository,omitempty"`
+ Reactions *Reactions `json:"reactions,omitempty"`
+ Assignees []*User `json:"assignees,omitempty"`
+
+ // TextMatches is only populated from search results that request text matches
+ // See: search.go and https://developer.github.com/v3/search/#text-match-metadata
+ TextMatches []TextMatch `json:"text_matches,omitempty"`
+}
+
+func (i Issue) String() string {
+ return Stringify(i)
+}
+
+// IssueRequest represents a request to create/edit an issue.
+// It is separate from Issue above because otherwise Labels
+// and Assignee fail to serialize to the correct JSON.
+type IssueRequest struct {
+ Title *string `json:"title,omitempty"`
+ Body *string `json:"body,omitempty"`
+ Labels *[]string `json:"labels,omitempty"`
+ Assignee *string `json:"assignee,omitempty"`
+ State *string `json:"state,omitempty"`
+ Milestone *int `json:"milestone,omitempty"`
+ Assignees *[]string `json:"assignees,omitempty"`
+}
+
+// IssueListOptions specifies the optional parameters to the IssuesService.List
+// and IssuesService.ListByOrg methods.
+type IssueListOptions struct {
+ // Filter specifies which issues to list. Possible values are: assigned,
+ // created, mentioned, subscribed, all. Default is "assigned".
+ Filter string `url:"filter,omitempty"`
+
+ // State filters issues based on their state. Possible values are: open,
+ // closed, all. Default is "open".
+ State string `url:"state,omitempty"`
+
+ // Labels filters issues based on their label.
+ Labels []string `url:"labels,comma,omitempty"`
+
+ // Sort specifies how to sort issues. Possible values are: created, updated,
+ // and comments. Default value is "created".
+ Sort string `url:"sort,omitempty"`
+
+ // Direction in which to sort issues. Possible values are: asc, desc.
+ // Default is "desc".
+ Direction string `url:"direction,omitempty"`
+
+ // Since filters issues by time.
+ Since time.Time `url:"since,omitempty"`
+
+ ListOptions
+}
+
+// PullRequestLinks object is added to the Issue object when it's an issue included
+// in the IssueCommentEvent webhook payload, if the webhooks is fired by a comment on a PR
+type PullRequestLinks struct {
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ DiffURL *string `json:"diff_url,omitempty"`
+ PatchURL *string `json:"patch_url,omitempty"`
+}
+
+// List the issues for the authenticated user. If all is true, list issues
+// across all the user's visible repositories including owned, member, and
+// organization repositories; if false, list only owned and member
+// repositories.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/#list-issues
+func (s *IssuesService) List(all bool, opt *IssueListOptions) ([]*Issue, *Response, error) {
+ var u string
+ if all {
+ u = "issues"
+ } else {
+ u = "user/issues"
+ }
+ return s.listIssues(u, opt)
+}
+
+// ListByOrg fetches the issues in the specified organization for the
+// authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/#list-issues
+func (s *IssuesService) ListByOrg(org string, opt *IssueListOptions) ([]*Issue, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/issues", org)
+ return s.listIssues(u, opt)
+}
+
+func (s *IssuesService) listIssues(u string, opt *IssueListOptions) ([]*Issue, *Response, error) {
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ issues := new([]*Issue)
+ resp, err := s.client.Do(req, issues)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *issues, resp, err
+}
+
+// IssueListByRepoOptions specifies the optional parameters to the
+// IssuesService.ListByRepo method.
+type IssueListByRepoOptions struct {
+ // Milestone limits issues for the specified milestone. Possible values are
+ // a milestone number, "none" for issues with no milestone, "*" for issues
+ // with any milestone.
+ Milestone string `url:"milestone,omitempty"`
+
+ // State filters issues based on their state. Possible values are: open,
+ // closed, all. Default is "open".
+ State string `url:"state,omitempty"`
+
+ // Assignee filters issues based on their assignee. Possible values are a
+ // user name, "none" for issues that are not assigned, "*" for issues with
+ // any assigned user.
+ Assignee string `url:"assignee,omitempty"`
+
+ // Creator filters issues based on their creator.
+ Creator string `url:"creator,omitempty"`
+
+ // Mentioned filters issues to those mentioned a specific user.
+ Mentioned string `url:"mentioned,omitempty"`
+
+ // Labels filters issues based on their label.
+ Labels []string `url:"labels,omitempty,comma"`
+
+ // Sort specifies how to sort issues. Possible values are: created, updated,
+ // and comments. Default value is "created".
+ Sort string `url:"sort,omitempty"`
+
+ // Direction in which to sort issues. Possible values are: asc, desc.
+ // Default is "desc".
+ Direction string `url:"direction,omitempty"`
+
+ // Since filters issues by time.
+ Since time.Time `url:"since,omitempty"`
+
+ ListOptions
+}
+
+// ListByRepo lists the issues for the specified repository.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/#list-issues-for-a-repository
+func (s *IssuesService) ListByRepo(owner string, repo string, opt *IssueListByRepoOptions) ([]*Issue, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ issues := new([]*Issue)
+ resp, err := s.client.Do(req, issues)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *issues, resp, err
+}
+
+// Get a single issue.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/#get-a-single-issue
+func (s *IssuesService) Get(owner string, repo string, number int) (*Issue, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ issue := new(Issue)
+ resp, err := s.client.Do(req, issue)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return issue, resp, err
+}
+
+// Create a new issue on the specified repository.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/#create-an-issue
+func (s *IssuesService) Create(owner string, repo string, issue *IssueRequest) (*Issue, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues", owner, repo)
+ req, err := s.client.NewRequest("POST", u, issue)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ i := new(Issue)
+ resp, err := s.client.Do(req, i)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return i, resp, err
+}
+
+// Edit an issue.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/#edit-an-issue
+func (s *IssuesService) Edit(owner string, repo string, number int, issue *IssueRequest) (*Issue, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number)
+ req, err := s.client.NewRequest("PATCH", u, issue)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ i := new(Issue)
+ resp, err := s.client.Do(req, i)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return i, resp, err
+}
+
+// Lock an issue's conversation.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/#lock-an-issue
+func (s *IssuesService) Lock(owner string, repo string, number int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number)
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// Unlock an issue's conversation.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/#unlock-an-issue
+func (s *IssuesService) Unlock(owner string, repo string, number int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/issues_assignees.go b/vendor/github.com/google/go-github/github/issues_assignees.go
new file mode 100644
index 0000000..2503be1
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/issues_assignees.go
@@ -0,0 +1,82 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// ListAssignees fetches all available assignees (owners and collaborators) to
+// which issues may be assigned.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/assignees/#list-assignees
+func (s *IssuesService) ListAssignees(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/assignees", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ assignees := new([]*User)
+ resp, err := s.client.Do(req, assignees)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *assignees, resp, err
+}
+
+// IsAssignee checks if a user is an assignee for the specified repository.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/assignees/#check-assignee
+func (s *IssuesService) IsAssignee(owner, repo, user string) (bool, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/assignees/%v", owner, repo, user)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+ resp, err := s.client.Do(req, nil)
+ assignee, err := parseBoolResponse(err)
+ return assignee, resp, err
+}
+
+// AddAssignees adds the provided GitHub users as assignees to the issue.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/assignees/#add-assignees-to-an-issue
+func (s *IssuesService) AddAssignees(owner, repo string, number int, assignees []string) (*Issue, *Response, error) {
+ users := &struct {
+ Assignees []string `json:"assignees,omitempty"`
+ }{Assignees: assignees}
+ u := fmt.Sprintf("repos/%v/%v/issues/%v/assignees", owner, repo, number)
+ req, err := s.client.NewRequest("POST", u, users)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ issue := &Issue{}
+ resp, err := s.client.Do(req, issue)
+ return issue, resp, err
+}
+
+// RemoveAssignees removes the provided GitHub users as assignees from the issue.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/assignees/#remove-assignees-from-an-issue
+func (s *IssuesService) RemoveAssignees(owner, repo string, number int, assignees []string) (*Issue, *Response, error) {
+ users := &struct {
+ Assignees []string `json:"assignees,omitempty"`
+ }{Assignees: assignees}
+ u := fmt.Sprintf("repos/%v/%v/issues/%v/assignees", owner, repo, number)
+ req, err := s.client.NewRequest("DELETE", u, users)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ issue := &Issue{}
+ resp, err := s.client.Do(req, issue)
+ return issue, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/issues_comments.go b/vendor/github.com/google/go-github/github/issues_comments.go
new file mode 100644
index 0000000..b24c5ae
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/issues_comments.go
@@ -0,0 +1,147 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// IssueComment represents a comment left on an issue.
+type IssueComment struct {
+ ID *int `json:"id,omitempty"`
+ Body *string `json:"body,omitempty"`
+ User *User `json:"user,omitempty"`
+ Reactions *Reactions `json:"reactions,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ IssueURL *string `json:"issue_url,omitempty"`
+}
+
+func (i IssueComment) String() string {
+ return Stringify(i)
+}
+
+// IssueListCommentsOptions specifies the optional parameters to the
+// IssuesService.ListComments method.
+type IssueListCommentsOptions struct {
+ // Sort specifies how to sort comments. Possible values are: created, updated.
+ Sort string `url:"sort,omitempty"`
+
+ // Direction in which to sort comments. Possible values are: asc, desc.
+ Direction string `url:"direction,omitempty"`
+
+ // Since filters comments by time.
+ Since time.Time `url:"since,omitempty"`
+
+ ListOptions
+}
+
+// ListComments lists all comments on the specified issue. Specifying an issue
+// number of 0 will return all comments on all issues for the repository.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
+func (s *IssuesService) ListComments(owner string, repo string, number int, opt *IssueListCommentsOptions) ([]*IssueComment, *Response, error) {
+ var u string
+ if number == 0 {
+ u = fmt.Sprintf("repos/%v/%v/issues/comments", owner, repo)
+ } else {
+ u = fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ comments := new([]*IssueComment)
+ resp, err := s.client.Do(req, comments)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *comments, resp, err
+}
+
+// GetComment fetches the specified issue comment.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/comments/#get-a-single-comment
+func (s *IssuesService) GetComment(owner string, repo string, id int) (*IssueComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ comment := new(IssueComment)
+ resp, err := s.client.Do(req, comment)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return comment, resp, err
+}
+
+// CreateComment creates a new comment on the specified issue.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/comments/#create-a-comment
+func (s *IssuesService) CreateComment(owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
+ req, err := s.client.NewRequest("POST", u, comment)
+ if err != nil {
+ return nil, nil, err
+ }
+ c := new(IssueComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// EditComment updates an issue comment.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/comments/#edit-a-comment
+func (s *IssuesService) EditComment(owner string, repo string, id int, comment *IssueComment) (*IssueComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
+ req, err := s.client.NewRequest("PATCH", u, comment)
+ if err != nil {
+ return nil, nil, err
+ }
+ c := new(IssueComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// DeleteComment deletes an issue comment.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/comments/#delete-a-comment
+func (s *IssuesService) DeleteComment(owner string, repo string, id int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/issues_events.go b/vendor/github.com/google/go-github/github/issues_events.go
new file mode 100644
index 0000000..71cf61a
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/issues_events.go
@@ -0,0 +1,149 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// IssueEvent represents an event that occurred around an Issue or Pull Request.
+type IssueEvent struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+
+ // The User that generated this event.
+ Actor *User `json:"actor,omitempty"`
+
+ // Event identifies the actual type of Event that occurred. Possible
+ // values are:
+ //
+ // closed
+ // The Actor closed the issue.
+ // If the issue was closed by commit message, CommitID holds the SHA1 hash of the commit.
+ //
+ // merged
+ // The Actor merged into master a branch containing a commit mentioning the issue.
+ // CommitID holds the SHA1 of the merge commit.
+ //
+ // referenced
+ // The Actor committed to master a commit mentioning the issue in its commit message.
+ // CommitID holds the SHA1 of the commit.
+ //
+ // reopened, locked, unlocked
+ // The Actor did that to the issue.
+ //
+ // renamed
+ // The Actor changed the issue title from Rename.From to Rename.To.
+ //
+ // mentioned
+ // Someone unspecified @mentioned the Actor [sic] in an issue comment body.
+ //
+ // assigned, unassigned
+ // The Actor assigned the issue to or removed the assignment from the Assignee.
+ //
+ // labeled, unlabeled
+ // The Actor added or removed the Label from the issue.
+ //
+ // milestoned, demilestoned
+ // The Actor added or removed the issue from the Milestone.
+ //
+ // subscribed, unsubscribed
+ // The Actor subscribed to or unsubscribed from notifications for an issue.
+ //
+ // head_ref_deleted, head_ref_restored
+ // The pull request’s branch was deleted or restored.
+ //
+ Event *string `json:"event,omitempty"`
+
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ Issue *Issue `json:"issue,omitempty"`
+
+ // Only present on certain events; see above.
+ Assignee *User `json:"assignee,omitempty"`
+ CommitID *string `json:"commit_id,omitempty"`
+ Milestone *Milestone `json:"milestone,omitempty"`
+ Label *Label `json:"label,omitempty"`
+ Rename *Rename `json:"rename,omitempty"`
+}
+
+// ListIssueEvents lists events for the specified issue.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-an-issue
+func (s *IssuesService) ListIssueEvents(owner, repo string, number int, opt *ListOptions) ([]*IssueEvent, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%v/events", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var events []*IssueEvent
+ resp, err := s.client.Do(req, &events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return events, resp, err
+}
+
+// ListRepositoryEvents lists events for the specified repository.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-a-repository
+func (s *IssuesService) ListRepositoryEvents(owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var events []*IssueEvent
+ resp, err := s.client.Do(req, &events)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return events, resp, err
+}
+
+// GetEvent returns the specified issue event.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/events/#get-a-single-event
+func (s *IssuesService) GetEvent(owner, repo string, id int) (*IssueEvent, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/events/%v", owner, repo, id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ event := new(IssueEvent)
+ resp, err := s.client.Do(req, event)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return event, resp, err
+}
+
+// Rename contains details for 'renamed' events.
+type Rename struct {
+ From *string `json:"from,omitempty"`
+ To *string `json:"to,omitempty"`
+}
+
+func (r Rename) String() string {
+ return Stringify(r)
+}
diff --git a/vendor/github.com/google/go-github/github/issues_labels.go b/vendor/github.com/google/go-github/github/issues_labels.go
new file mode 100644
index 0000000..c654547
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/issues_labels.go
@@ -0,0 +1,222 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Label represents a GitHub label on an Issue
+type Label struct {
+ URL *string `json:"url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Color *string `json:"color,omitempty"`
+}
+
+func (l Label) String() string {
+ return fmt.Sprint(*l.Name)
+}
+
+// ListLabels lists all labels for a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository
+func (s *IssuesService) ListLabels(owner string, repo string, opt *ListOptions) ([]*Label, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/labels", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ labels := new([]*Label)
+ resp, err := s.client.Do(req, labels)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *labels, resp, err
+}
+
+// GetLabel gets a single label.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#get-a-single-label
+func (s *IssuesService) GetLabel(owner string, repo string, name string) (*Label, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ label := new(Label)
+ resp, err := s.client.Do(req, label)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return label, resp, err
+}
+
+// CreateLabel creates a new label on the specified repository.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#create-a-label
+func (s *IssuesService) CreateLabel(owner string, repo string, label *Label) (*Label, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/labels", owner, repo)
+ req, err := s.client.NewRequest("POST", u, label)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ l := new(Label)
+ resp, err := s.client.Do(req, l)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return l, resp, err
+}
+
+// EditLabel edits a label.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#update-a-label
+func (s *IssuesService) EditLabel(owner string, repo string, name string, label *Label) (*Label, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
+ req, err := s.client.NewRequest("PATCH", u, label)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ l := new(Label)
+ resp, err := s.client.Do(req, l)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return l, resp, err
+}
+
+// DeleteLabel deletes a label.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#delete-a-label
+func (s *IssuesService) DeleteLabel(owner string, repo string, name string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// ListLabelsByIssue lists all labels for an issue.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository
+func (s *IssuesService) ListLabelsByIssue(owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ labels := new([]*Label)
+ resp, err := s.client.Do(req, labels)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *labels, resp, err
+}
+
+// AddLabelsToIssue adds labels to an issue.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository
+func (s *IssuesService) AddLabelsToIssue(owner string, repo string, number int, labels []string) ([]*Label, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
+ req, err := s.client.NewRequest("POST", u, labels)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ l := new([]*Label)
+ resp, err := s.client.Do(req, l)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *l, resp, err
+}
+
+// RemoveLabelForIssue removes a label for an issue.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#remove-a-label-from-an-issue
+func (s *IssuesService) RemoveLabelForIssue(owner string, repo string, number int, label string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d/labels/%v", owner, repo, number, label)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// ReplaceLabelsForIssue replaces all labels for an issue.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#replace-all-labels-for-an-issue
+func (s *IssuesService) ReplaceLabelsForIssue(owner string, repo string, number int, labels []string) ([]*Label, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
+ req, err := s.client.NewRequest("PUT", u, labels)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ l := new([]*Label)
+ resp, err := s.client.Do(req, l)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *l, resp, err
+}
+
+// RemoveLabelsForIssue removes all labels for an issue.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#remove-all-labels-from-an-issue
+func (s *IssuesService) RemoveLabelsForIssue(owner string, repo string, number int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// ListLabelsForMilestone lists labels for every issue in a milestone.
+//
+// GitHub API docs: http://developer.github.com/v3/issues/labels/#get-labels-for-every-issue-in-a-milestone
+func (s *IssuesService) ListLabelsForMilestone(owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/milestones/%d/labels", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ labels := new([]*Label)
+ resp, err := s.client.Do(req, labels)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *labels, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/issues_milestones.go b/vendor/github.com/google/go-github/github/issues_milestones.go
new file mode 100644
index 0000000..b7621ac
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/issues_milestones.go
@@ -0,0 +1,146 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// Milestone represents a Github repository milestone.
+type Milestone struct {
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ LabelsURL *string `json:"labels_url,omitempty"`
+ ID *int `json:"id,omitempty"`
+ Number *int `json:"number,omitempty"`
+ State *string `json:"state,omitempty"`
+ Title *string `json:"title,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Creator *User `json:"creator,omitempty"`
+ OpenIssues *int `json:"open_issues,omitempty"`
+ ClosedIssues *int `json:"closed_issues,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ ClosedAt *time.Time `json:"closed_at,omitempty"`
+ DueOn *time.Time `json:"due_on,omitempty"`
+}
+
+func (m Milestone) String() string {
+ return Stringify(m)
+}
+
+// MilestoneListOptions specifies the optional parameters to the
+// IssuesService.ListMilestones method.
+type MilestoneListOptions struct {
+ // State filters milestones based on their state. Possible values are:
+ // open, closed. Default is "open".
+ State string `url:"state,omitempty"`
+
+ // Sort specifies how to sort milestones. Possible values are: due_date, completeness.
+ // Default value is "due_date".
+ Sort string `url:"sort,omitempty"`
+
+ // Direction in which to sort milestones. Possible values are: asc, desc.
+ // Default is "asc".
+ Direction string `url:"direction,omitempty"`
+
+ ListOptions
+}
+
+// ListMilestones lists all milestones for a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/milestones/#list-milestones-for-a-repository
+func (s *IssuesService) ListMilestones(owner string, repo string, opt *MilestoneListOptions) ([]*Milestone, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ milestones := new([]*Milestone)
+ resp, err := s.client.Do(req, milestones)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *milestones, resp, err
+}
+
+// GetMilestone gets a single milestone.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/milestones/#get-a-single-milestone
+func (s *IssuesService) GetMilestone(owner string, repo string, number int) (*Milestone, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ milestone := new(Milestone)
+ resp, err := s.client.Do(req, milestone)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return milestone, resp, err
+}
+
+// CreateMilestone creates a new milestone on the specified repository.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/milestones/#create-a-milestone
+func (s *IssuesService) CreateMilestone(owner string, repo string, milestone *Milestone) (*Milestone, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo)
+ req, err := s.client.NewRequest("POST", u, milestone)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m := new(Milestone)
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, err
+}
+
+// EditMilestone edits a milestone.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/milestones/#update-a-milestone
+func (s *IssuesService) EditMilestone(owner string, repo string, number int, milestone *Milestone) (*Milestone, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
+ req, err := s.client.NewRequest("PATCH", u, milestone)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m := new(Milestone)
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, err
+}
+
+// DeleteMilestone deletes a milestone.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/milestones/#delete-a-milestone
+func (s *IssuesService) DeleteMilestone(owner string, repo string, number int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/issues_timeline.go b/vendor/github.com/google/go-github/github/issues_timeline.go
new file mode 100644
index 0000000..d20eef8
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/issues_timeline.go
@@ -0,0 +1,148 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// Timeline represents an event that occurred around an Issue or Pull Request.
+//
+// It is similar to an IssueEvent but may contain more information.
+// GitHub API docs: https://developer.github.com/v3/issues/timeline/
+type Timeline struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ CommitURL *string `json:"commit_url,omitempty"`
+
+ // The User object that generated the event.
+ Actor *User `json:"actor,omitempty"`
+
+ // Event identifies the actual type of Event that occurred. Possible values
+ // are:
+ //
+ // assigned
+ // The issue was assigned to the assignee.
+ //
+ // closed
+ // The issue was closed by the actor. When the commit_id is present, it
+ // identifies the commit that closed the issue using "closes / fixes #NN"
+ // syntax.
+ //
+ // commented
+ // A comment was added to the issue.
+ //
+ // committed
+ // A commit was added to the pull request's 'HEAD' branch. Only provided
+ // for pull requests.
+ //
+ // cross-referenced
+ // The issue was referenced from another issue. The 'source' attribute
+ // contains the 'id', 'actor', and 'url' of the reference's source.
+ //
+ // demilestoned
+ // The issue was removed from a milestone.
+ //
+ // head_ref_deleted
+ // The pull request's branch was deleted.
+ //
+ // head_ref_restored
+ // The pull request's branch was restored.
+ //
+ // labeled
+ // A label was added to the issue.
+ //
+ // locked
+ // The issue was locked by the actor.
+ //
+ // mentioned
+ // The actor was @mentioned in an issue body.
+ //
+ // merged
+ // The issue was merged by the actor. The 'commit_id' attribute is the
+ // SHA1 of the HEAD commit that was merged.
+ //
+ // milestoned
+ // The issue was added to a milestone.
+ //
+ // referenced
+ // The issue was referenced from a commit message. The 'commit_id'
+ // attribute is the commit SHA1 of where that happened.
+ //
+ // renamed
+ // The issue title was changed.
+ //
+ // reopened
+ // The issue was reopened by the actor.
+ //
+ // subscribed
+ // The actor subscribed to receive notifications for an issue.
+ //
+ // unassigned
+ // The assignee was unassigned from the issue.
+ //
+ // unlabeled
+ // A label was removed from the issue.
+ //
+ // unlocked
+ // The issue was unlocked by the actor.
+ //
+ // unsubscribed
+ // The actor unsubscribed to stop receiving notifications for an issue.
+ //
+ Event *string `json:"event,omitempty"`
+
+ // The string SHA of a commit that referenced this Issue or Pull Request.
+ CommitID *string `json:"commit_id,omitempty"`
+ // The timestamp indicating when the event occurred.
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ // The Label object including `name` and `color` attributes. Only provided for
+ // 'labeled' and 'unlabeled' events.
+ Label *Label `json:"label,omitempty"`
+ // The User object which was assigned to (or unassigned from) this Issue or
+ // Pull Request. Only provided for 'assigned' and 'unassigned' events.
+ Assignee *User `json:"assignee,omitempty"`
+ // The Milestone object including a 'title' attribute.
+ // Only provided for 'milestoned' and 'demilestoned' events.
+ Milestone *Milestone `json:"milestone,omitempty"`
+ // The 'id', 'actor', and 'url' for the source of a reference from another issue.
+ // Only provided for 'cross-referenced' events.
+ Source *Source `json:"source,omitempty"`
+ // An object containing rename details including 'from' and 'to' attributes.
+ // Only provided for 'renamed' events.
+ Rename *Rename `json:"rename,omitempty"`
+}
+
+// Source represents a reference's source.
+type Source struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Actor *User `json:"actor,omitempty"`
+}
+
+// ListIssueTimeline lists events for the specified issue.
+//
+// GitHub API docs: https://developer.github.com/v3/issues/timeline/#list-events-for-an-issue
+func (s *IssuesService) ListIssueTimeline(owner, repo string, number int, opt *ListOptions) ([]*Timeline, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%v/timeline", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeTimelinePreview)
+
+ var events []*Timeline
+ resp, err := s.client.Do(req, &events)
+ return events, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/licenses.go b/vendor/github.com/google/go-github/github/licenses.go
new file mode 100644
index 0000000..0b5e8b3
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/licenses.go
@@ -0,0 +1,100 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// LicensesService handles communication with the license related
+// methods of the GitHub API.
+//
+// GitHub API docs: https://developer.github.com/v3/licenses/
+type LicensesService service
+
+// RepositoryLicense represents the license for a repository.
+type RepositoryLicense struct {
+ Name *string `json:"name,omitempty"`
+ Path *string `json:"path,omitempty"`
+
+ SHA *string `json:"sha,omitempty"`
+ Size *int `json:"size,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ GitURL *string `json:"git_url,omitempty"`
+ DownloadURL *string `json:"download_url,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Content *string `json:"content,omitempty"`
+ Encoding *string `json:"encoding,omitempty"`
+ License *License `json:"license,omitempty"`
+}
+
+func (l RepositoryLicense) String() string {
+ return Stringify(l)
+}
+
+// License represents an open source license.
+type License struct {
+ Key *string `json:"key,omitempty"`
+ Name *string `json:"name,omitempty"`
+ URL *string `json:"url,omitempty"`
+
+ SPDXID *string `json:"spdx_id,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ Featured *bool `json:"featured,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Implementation *string `json:"implementation,omitempty"`
+ Permissions *[]string `json:"permissions,omitempty"`
+ Conditions *[]string `json:"conditions,omitempty"`
+ Limitations *[]string `json:"limitations,omitempty"`
+ Body *string `json:"body,omitempty"`
+}
+
+func (l License) String() string {
+ return Stringify(l)
+}
+
+// List popular open source licenses.
+//
+// GitHub API docs: https://developer.github.com/v3/licenses/#list-all-licenses
+func (s *LicensesService) List() ([]*License, *Response, error) {
+ req, err := s.client.NewRequest("GET", "licenses", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeLicensesPreview)
+
+ licenses := new([]*License)
+ resp, err := s.client.Do(req, licenses)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *licenses, resp, err
+}
+
+// Get extended metadata for one license.
+//
+// GitHub API docs: https://developer.github.com/v3/licenses/#get-an-individual-license
+func (s *LicensesService) Get(licenseName string) (*License, *Response, error) {
+ u := fmt.Sprintf("licenses/%s", licenseName)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeLicensesPreview)
+
+ license := new(License)
+ resp, err := s.client.Do(req, license)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return license, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/messages.go b/vendor/github.com/google/go-github/github/messages.go
new file mode 100644
index 0000000..5f67ba5
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/messages.go
@@ -0,0 +1,195 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides functions for validating payloads from GitHub Webhooks.
+// GitHub docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github
+
+package github
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "hash"
+ "io/ioutil"
+ "net/http"
+ "strings"
+)
+
+const (
+ // sha1Prefix is the prefix used by GitHub before the HMAC hexdigest.
+ sha1Prefix = "sha1"
+ // sha256Prefix and sha512Prefix are provided for future compatibility.
+ sha256Prefix = "sha256"
+ sha512Prefix = "sha512"
+ // signatureHeader is the GitHub header key used to pass the HMAC hexdigest.
+ signatureHeader = "X-Hub-Signature"
+ // eventTypeHeader is the Github header key used to pass the event type.
+ eventTypeHeader = "X-Github-Event"
+)
+
+var (
+ // eventTypeMapping maps webhooks types to their corresponding go-github struct types.
+ eventTypeMapping = map[string]string{
+ "commit_comment": "CommitCommentEvent",
+ "create": "CreateEvent",
+ "delete": "DeleteEvent",
+ "deployment": "DeploymentEvent",
+ "deployment_status": "DeploymentStatusEvent",
+ "fork": "ForkEvent",
+ "gollum": "GollumEvent",
+ "integration_installation": "IntegrationInstallationEvent",
+ "integration_installation_repositories": "IntegrationInstallationRepositoriesEvent",
+ "issue_comment": "IssueCommentEvent",
+ "issues": "IssuesEvent",
+ "label": "LabelEvent",
+ "member": "MemberEvent",
+ "membership": "MembershipEvent",
+ "milestone": "MilestoneEvent",
+ "organization": "OrganizationEvent",
+ "page_build": "PageBuildEvent",
+ "ping": "PingEvent",
+ "public": "PublicEvent",
+ "pull_request_review": "PullRequestReviewEvent",
+ "pull_request_review_comment": "PullRequestReviewCommentEvent",
+ "pull_request": "PullRequestEvent",
+ "push": "PushEvent",
+ "repository": "RepositoryEvent",
+ "release": "ReleaseEvent",
+ "status": "StatusEvent",
+ "team_add": "TeamAddEvent",
+ "watch": "WatchEvent",
+ }
+)
+
+// genMAC generates the HMAC signature for a message provided the secret key
+// and hashFunc.
+func genMAC(message, key []byte, hashFunc func() hash.Hash) []byte {
+ mac := hmac.New(hashFunc, key)
+ mac.Write(message)
+ return mac.Sum(nil)
+}
+
+// checkMAC reports whether messageMAC is a valid HMAC tag for message.
+func checkMAC(message, messageMAC, key []byte, hashFunc func() hash.Hash) bool {
+ expectedMAC := genMAC(message, key, hashFunc)
+ return hmac.Equal(messageMAC, expectedMAC)
+}
+
+// messageMAC returns the hex-decoded HMAC tag from the signature and its
+// corresponding hash function.
+func messageMAC(signature string) ([]byte, func() hash.Hash, error) {
+ if signature == "" {
+ return nil, nil, errors.New("missing signature")
+ }
+ sigParts := strings.SplitN(signature, "=", 2)
+ if len(sigParts) != 2 {
+ return nil, nil, fmt.Errorf("error parsing signature %q", signature)
+ }
+
+ var hashFunc func() hash.Hash
+ switch sigParts[0] {
+ case sha1Prefix:
+ hashFunc = sha1.New
+ case sha256Prefix:
+ hashFunc = sha256.New
+ case sha512Prefix:
+ hashFunc = sha512.New
+ default:
+ return nil, nil, fmt.Errorf("unknown hash type prefix: %q", sigParts[0])
+ }
+
+ buf, err := hex.DecodeString(sigParts[1])
+ if err != nil {
+ return nil, nil, fmt.Errorf("error decoding signature %q: %v", signature, err)
+ }
+ return buf, hashFunc, nil
+}
+
+// ValidatePayload validates an incoming GitHub Webhook event request
+// and returns the (JSON) payload.
+// secretKey is the GitHub Webhook secret message.
+//
+// Example usage:
+//
+// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+// payload, err := github.ValidatePayload(r, s.webhookSecretKey)
+// if err != nil { ... }
+// // Process payload...
+// }
+//
+func ValidatePayload(r *http.Request, secretKey []byte) (payload []byte, err error) {
+ payload, err = ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ sig := r.Header.Get(signatureHeader)
+ if err := validateSignature(sig, payload, secretKey); err != nil {
+ return nil, err
+ }
+ return payload, nil
+}
+
+// validateSignature validates the signature for the given payload.
+// signature is the GitHub hash signature delivered in the X-Hub-Signature header.
+// payload is the JSON payload sent by GitHub Webhooks.
+// secretKey is the GitHub Webhook secret message.
+//
+// GitHub docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github
+func validateSignature(signature string, payload, secretKey []byte) error {
+ messageMAC, hashFunc, err := messageMAC(signature)
+ if err != nil {
+ return err
+ }
+ if !checkMAC(payload, messageMAC, secretKey, hashFunc) {
+ return errors.New("payload signature check failed")
+ }
+ return nil
+}
+
+// WebHookType returns the event type of webhook request r.
+func WebHookType(r *http.Request) string {
+ return r.Header.Get(eventTypeHeader)
+}
+
+// ParseWebHook parses the event payload. For recognized event types, a
+// value of the corresponding struct type will be returned (as returned
+// by Event.Payload()). An error will be returned for unrecognized event
+// types.
+//
+// Example usage:
+//
+// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+// payload, err := github.ValidatePayload(r, s.webhookSecretKey)
+// if err != nil { ... }
+// event, err := github.ParseWebHook(github.WebHookType(r), payload)
+// if err != nil { ... }
+// switch event := event.(type) {
+// case *github.CommitCommentEvent:
+// processCommitCommentEvent(event)
+// case *github.CreateEvent:
+// processCreateEvent(event)
+// ...
+// }
+// }
+//
+func ParseWebHook(messageType string, payload []byte) (interface{}, error) {
+ eventType, ok := eventTypeMapping[messageType]
+ if !ok {
+ return nil, fmt.Errorf("unknown X-Github-Event in message: %v", messageType)
+ }
+
+ event := Event{
+ Type: &eventType,
+ RawPayload: (*json.RawMessage)(&payload),
+ }
+ return event.Payload(), nil
+}
diff --git a/vendor/github.com/google/go-github/github/migrations.go b/vendor/github.com/google/go-github/github/migrations.go
new file mode 100644
index 0000000..a7890b0
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/migrations.go
@@ -0,0 +1,223 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+// MigrationService provides access to the migration related functions
+// in the GitHub API.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/
+type MigrationService service
+
+// Migration represents a GitHub migration (archival).
+type Migration struct {
+ ID *int `json:"id,omitempty"`
+ GUID *string `json:"guid,omitempty"`
+ // State is the current state of a migration.
+ // Possible values are:
+ // "pending" which means the migration hasn't started yet,
+ // "exporting" which means the migration is in progress,
+ // "exported" which means the migration finished successfully, or
+ // "failed" which means the migration failed.
+ State *string `json:"state,omitempty"`
+ // LockRepositories indicates whether repositories are locked (to prevent
+ // manipulation) while migrating data.
+ LockRepositories *bool `json:"lock_repositories,omitempty"`
+ // ExcludeAttachments indicates whether attachments should be excluded from
+ // the migration (to reduce migration archive file size).
+ ExcludeAttachments *bool `json:"exclude_attachments,omitempty"`
+ URL *string `json:"url,omitempty"`
+ CreatedAt *string `json:"created_at,omitempty"`
+ UpdatedAt *string `json:"updated_at,omitempty"`
+ Repositories []*Repository `json:"repositories,omitempty"`
+}
+
+func (m Migration) String() string {
+ return Stringify(m)
+}
+
+// MigrationOptions specifies the optional parameters to Migration methods.
+type MigrationOptions struct {
+ // LockRepositories indicates whether repositories should be locked (to prevent
+ // manipulation) while migrating data.
+ LockRepositories bool
+
+ // ExcludeAttachments indicates whether attachments should be excluded from
+ // the migration (to reduce migration archive file size).
+ ExcludeAttachments bool
+}
+
+// startMigration represents the body of a StartMigration request.
+type startMigration struct {
+ // Repositories is a slice of repository names to migrate.
+ Repositories []string `json:"repositories,omitempty"`
+
+ // LockRepositories indicates whether repositories should be locked (to prevent
+ // manipulation) while migrating data.
+ LockRepositories *bool `json:"lock_repositories,omitempty"`
+
+ // ExcludeAttachments indicates whether attachments should be excluded from
+ // the migration (to reduce migration archive file size).
+ ExcludeAttachments *bool `json:"exclude_attachments,omitempty"`
+}
+
+// StartMigration starts the generation of a migration archive.
+// repos is a slice of repository names to migrate.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/migrations/#start-a-migration
+func (s *MigrationService) StartMigration(org string, repos []string, opt *MigrationOptions) (*Migration, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/migrations", org)
+
+ body := &startMigration{Repositories: repos}
+ if opt != nil {
+ body.LockRepositories = Bool(opt.LockRepositories)
+ body.ExcludeAttachments = Bool(opt.ExcludeAttachments)
+ }
+
+ req, err := s.client.NewRequest("POST", u, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeMigrationsPreview)
+
+ m := &Migration{}
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// ListMigrations lists the most recent migrations.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/migrations/#get-a-list-of-migrations
+func (s *MigrationService) ListMigrations(org string) ([]*Migration, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/migrations", org)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeMigrationsPreview)
+
+ var m []*Migration
+ resp, err := s.client.Do(req, &m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// MigrationStatus gets the status of a specific migration archive.
+// id is the migration ID.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/migrations/#get-the-status-of-a-migration
+func (s *MigrationService) MigrationStatus(org string, id int) (*Migration, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/migrations/%v", org, id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeMigrationsPreview)
+
+ m := &Migration{}
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// MigrationArchiveURL fetches a migration archive URL.
+// id is the migration ID.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/migrations/#download-a-migration-archive
+func (s *MigrationService) MigrationArchiveURL(org string, id int) (url string, err error) {
+ u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return "", err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeMigrationsPreview)
+
+ s.client.clientMu.Lock()
+ defer s.client.clientMu.Unlock()
+
+ // Disable the redirect mechanism because AWS fails if the GitHub auth token is provided.
+ var loc string
+ saveRedirect := s.client.client.CheckRedirect
+ s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ loc = req.URL.String()
+ return errors.New("disable redirect")
+ }
+ defer func() { s.client.client.CheckRedirect = saveRedirect }()
+
+ _, err = s.client.Do(req, nil) // expect error from disable redirect
+ if err == nil {
+ return "", errors.New("expected redirect, none provided")
+ }
+ if !strings.Contains(err.Error(), "disable redirect") {
+ return "", err
+ }
+ return loc, nil
+}
+
+// DeleteMigration deletes a previous migration archive.
+// id is the migration ID.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/migrations/#delete-a-migration-archive
+func (s *MigrationService) DeleteMigration(org string, id int) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeMigrationsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// UnlockRepo unlocks a repository that was locked for migration.
+// id is the migration ID.
+// You should unlock each migrated repository and delete them when the migration
+// is complete and you no longer need the source data.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/migrations/#unlock-a-repository
+func (s *MigrationService) UnlockRepo(org string, id int, repo string) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/migrations/%v/repos/%v/lock", org, id, repo)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeMigrationsPreview)
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/migrations_source_import.go b/vendor/github.com/google/go-github/github/migrations_source_import.go
new file mode 100644
index 0000000..44505fa
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/migrations_source_import.go
@@ -0,0 +1,326 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Import represents a repository import request.
+type Import struct {
+ // The URL of the originating repository.
+ VCSURL *string `json:"vcs_url,omitempty"`
+ // The originating VCS type. Can be one of 'subversion', 'git',
+ // 'mercurial', or 'tfvc'. Without this parameter, the import job will
+ // take additional time to detect the VCS type before beginning the
+ // import. This detection step will be reflected in the response.
+ VCS *string `json:"vcs,omitempty"`
+ // VCSUsername and VCSPassword are only used for StartImport calls that
+ // are importing a password-protected repository.
+ VCSUsername *string `json:"vcs_username,omitempty"`
+ VCSPassword *string `json:"vcs_password,omitempty"`
+ // For a tfvc import, the name of the project that is being imported.
+ TFVCProject *string `json:"tfvc_project,omitempty"`
+
+ // LFS related fields that may be preset in the Import Progress response
+
+ // Describes whether the import has been opted in or out of using Git
+ // LFS. The value can be 'opt_in', 'opt_out', or 'undecided' if no
+ // action has been taken.
+ UseLFS *string `json:"use_lfs,omitempty"`
+ // Describes whether files larger than 100MB were found during the
+ // importing step.
+ HasLargeFiles *bool `json:"has_large_files,omitempty"`
+ // The total size in gigabytes of files larger than 100MB found in the
+ // originating repository.
+ LargeFilesSize *int `json:"large_files_size,omitempty"`
+ // The total number of files larger than 100MB found in the originating
+ // repository. To see a list of these files, call LargeFiles.
+ LargeFilesCount *int `json:"large_files_count,omitempty"`
+
+ // Identifies the current status of an import. An import that does not
+ // have errors will progress through these steps:
+ //
+ // detecting - the "detection" step of the import is in progress
+ // because the request did not include a VCS parameter. The
+ // import is identifying the type of source control present at
+ // the URL.
+ // importing - the "raw" step of the import is in progress. This is
+ // where commit data is fetched from the original repository.
+ // The import progress response will include CommitCount (the
+ // total number of raw commits that will be imported) and
+ // Percent (0 - 100, the current progress through the import).
+ // mapping - the "rewrite" step of the import is in progress. This
+ // is where SVN branches are converted to Git branches, and
+ // where author updates are applied. The import progress
+ // response does not include progress information.
+ // pushing - the "push" step of the import is in progress. This is
+ // where the importer updates the repository on GitHub. The
+ // import progress response will include PushPercent, which is
+ // the percent value reported by git push when it is "Writing
+ // objects".
+ // complete - the import is complete, and the repository is ready
+ // on GitHub.
+ //
+ // If there are problems, you will see one of these in the status field:
+ //
+ // auth_failed - the import requires authentication in order to
+ // connect to the original repository. Make an UpdateImport
+ // request, and include VCSUsername and VCSPassword.
+ // error - the import encountered an error. The import progress
+ // response will include the FailedStep and an error message.
+ // Contact GitHub support for more information.
+ // detection_needs_auth - the importer requires authentication for
+ // the originating repository to continue detection. Make an
+ // UpdatImport request, and include VCSUsername and
+ // VCSPassword.
+ // detection_found_nothing - the importer didn't recognize any
+ // source control at the URL.
+ // detection_found_multiple - the importer found several projects
+ // or repositories at the provided URL. When this is the case,
+ // the Import Progress response will also include a
+ // ProjectChoices field with the possible project choices as
+ // values. Make an UpdateImport request, and include VCS and
+ // (if applicable) TFVCProject.
+ Status *string `json:"status,omitempty"`
+ CommitCount *int `json:"commit_count,omitempty"`
+ StatusText *string `json:"status_text,omitempty"`
+ AuthorsCount *int `json:"authors_count,omitempty"`
+ Percent *int `json:"percent,omitempty"`
+ PushPercent *int `json:"push_percent,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ AuthorsURL *string `json:"authors_url,omitempty"`
+ RepositoryURL *string `json:"repository_url,omitempty"`
+ Message *string `json:"message,omitempty"`
+ FailedStep *string `json:"failed_step,omitempty"`
+
+ // Human readable display name, provided when the Import appears as
+ // part of ProjectChoices.
+ HumanName *string `json:"human_name,omitempty"`
+
+ // When the importer finds several projects or repositories at the
+ // provided URLs, this will identify the available choices. Call
+ // UpdateImport with the selected Import value.
+ ProjectChoices []Import `json:"project_choices,omitempty"`
+}
+
+func (i Import) String() string {
+ return Stringify(i)
+}
+
+// SourceImportAuthor identifies an author imported from a source repository.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-commit-authors
+type SourceImportAuthor struct {
+ ID *int `json:"id,omitempty"`
+ RemoteID *string `json:"remote_id,omitempty"`
+ RemoteName *string `json:"remote_name,omitempty"`
+ Email *string `json:"email,omitempty"`
+ Name *string `json:"name,omitempty"`
+ URL *string `json:"url,omitempty"`
+ ImportURL *string `json:"import_url,omitempty"`
+}
+
+func (a SourceImportAuthor) String() string {
+ return Stringify(a)
+}
+
+// LargeFile identifies a file larger than 100MB found during a repository import.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-large-files
+type LargeFile struct {
+ RefName *string `json:"ref_name,omitempty"`
+ Path *string `json:"path,omitempty"`
+ OID *string `json:"oid,omitempty"`
+ Size *int `json:"size,omitempty"`
+}
+
+func (f LargeFile) String() string {
+ return Stringify(f)
+}
+
+// StartImport initiates a repository import.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#start-an-import
+func (s *MigrationService) StartImport(owner, repo string, in *Import) (*Import, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
+ req, err := s.client.NewRequest("PUT", u, in)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeImportPreview)
+
+ out := new(Import)
+ resp, err := s.client.Do(req, out)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return out, resp, err
+}
+
+// ImportProgress queries for the status and progress of an ongoing repository import.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-import-progress
+func (s *MigrationService) ImportProgress(owner, repo string) (*Import, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeImportPreview)
+
+ out := new(Import)
+ resp, err := s.client.Do(req, out)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return out, resp, err
+}
+
+// UpdateImport initiates a repository import.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#update-existing-import
+func (s *MigrationService) UpdateImport(owner, repo string, in *Import) (*Import, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
+ req, err := s.client.NewRequest("PATCH", u, in)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeImportPreview)
+
+ out := new(Import)
+ resp, err := s.client.Do(req, out)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return out, resp, err
+}
+
+// CommitAuthors gets the authors mapped from the original repository.
+//
+// Each type of source control system represents authors in a different way.
+// For example, a Git commit author has a display name and an email address,
+// but a Subversion commit author just has a username. The GitHub Importer will
+// make the author information valid, but the author might not be correct. For
+// example, it will change the bare Subversion username "hubot" into something
+// like "hubot ".
+//
+// This method and MapCommitAuthor allow you to provide correct Git author
+// information.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-commit-authors
+func (s *MigrationService) CommitAuthors(owner, repo string) ([]*SourceImportAuthor, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/import/authors", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeImportPreview)
+
+ authors := new([]*SourceImportAuthor)
+ resp, err := s.client.Do(req, authors)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *authors, resp, err
+}
+
+// MapCommitAuthor updates an author's identity for the import. Your
+// application can continue updating authors any time before you push new
+// commits to the repository.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#map-a-commit-author
+func (s *MigrationService) MapCommitAuthor(owner, repo string, id int, author *SourceImportAuthor) (*SourceImportAuthor, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/import/authors/%v", owner, repo, id)
+ req, err := s.client.NewRequest("PATCH", u, author)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeImportPreview)
+
+ out := new(SourceImportAuthor)
+ resp, err := s.client.Do(req, out)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return out, resp, err
+}
+
+// SetLFSPreference sets whether imported repositories should use Git LFS for
+// files larger than 100MB. Only the UseLFS field on the provided Import is
+// used.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#set-git-lfs-preference
+func (s *MigrationService) SetLFSPreference(owner, repo string, in *Import) (*Import, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/import/lfs", owner, repo)
+ req, err := s.client.NewRequest("PATCH", u, in)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeImportPreview)
+
+ out := new(Import)
+ resp, err := s.client.Do(req, out)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return out, resp, err
+}
+
+// LargeFiles lists files larger than 100MB found during the import.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-large-files
+func (s *MigrationService) LargeFiles(owner, repo string) ([]*LargeFile, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/import/large_files", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeImportPreview)
+
+ files := new([]*LargeFile)
+ resp, err := s.client.Do(req, files)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *files, resp, err
+}
+
+// CancelImport stops an import for a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#cancel-an-import
+func (s *MigrationService) CancelImport(owner, repo string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeImportPreview)
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/misc.go b/vendor/github.com/google/go-github/github/misc.go
new file mode 100644
index 0000000..8576a4c
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/misc.go
@@ -0,0 +1,197 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+)
+
+// MarkdownOptions specifies optional parameters to the Markdown method.
+type MarkdownOptions struct {
+ // Mode identifies the rendering mode. Possible values are:
+ // markdown - render a document as plain Markdown, just like
+ // README files are rendered.
+ //
+ // gfm - to render a document as user-content, e.g. like user
+ // comments or issues are rendered. In GFM mode, hard line breaks are
+ // always taken into account, and issue and user mentions are linked
+ // accordingly.
+ //
+ // Default is "markdown".
+ Mode string
+
+ // Context identifies the repository context. Only taken into account
+ // when rendering as "gfm".
+ Context string
+}
+
+type markdownRequest struct {
+ Text *string `json:"text,omitempty"`
+ Mode *string `json:"mode,omitempty"`
+ Context *string `json:"context,omitempty"`
+}
+
+// Markdown renders an arbitrary Markdown document.
+//
+// GitHub API docs: https://developer.github.com/v3/markdown/
+func (c *Client) Markdown(text string, opt *MarkdownOptions) (string, *Response, error) {
+ request := &markdownRequest{Text: String(text)}
+ if opt != nil {
+ if opt.Mode != "" {
+ request.Mode = String(opt.Mode)
+ }
+ if opt.Context != "" {
+ request.Context = String(opt.Context)
+ }
+ }
+
+ req, err := c.NewRequest("POST", "markdown", request)
+ if err != nil {
+ return "", nil, err
+ }
+
+ buf := new(bytes.Buffer)
+ resp, err := c.Do(req, buf)
+ if err != nil {
+ return "", resp, err
+ }
+
+ return buf.String(), resp, nil
+}
+
+// ListEmojis returns the emojis available to use on GitHub.
+//
+// GitHub API docs: https://developer.github.com/v3/emojis/
+func (c *Client) ListEmojis() (map[string]string, *Response, error) {
+ req, err := c.NewRequest("GET", "emojis", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var emoji map[string]string
+ resp, err := c.Do(req, &emoji)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return emoji, resp, nil
+}
+
+// APIMeta represents metadata about the GitHub API.
+type APIMeta struct {
+ // An Array of IP addresses in CIDR format specifying the addresses
+ // that incoming service hooks will originate from on GitHub.com.
+ Hooks []string `json:"hooks,omitempty"`
+
+ // An Array of IP addresses in CIDR format specifying the Git servers
+ // for GitHub.com.
+ Git []string `json:"git,omitempty"`
+
+ // Whether authentication with username and password is supported.
+ // (GitHub Enterprise instances using CAS or OAuth for authentication
+ // will return false. Features like Basic Authentication with a
+ // username and password, sudo mode, and two-factor authentication are
+ // not supported on these servers.)
+ VerifiablePasswordAuthentication *bool `json:"verifiable_password_authentication,omitempty"`
+
+ // An array of IP addresses in CIDR format specifying the addresses
+ // which serve GitHub Pages websites.
+ Pages []string `json:"pages,omitempty"`
+}
+
+// APIMeta returns information about GitHub.com, the service. Or, if you access
+// this endpoint on your organization’s GitHub Enterprise installation, this
+// endpoint provides information about that installation.
+//
+// GitHub API docs: https://developer.github.com/v3/meta/
+func (c *Client) APIMeta() (*APIMeta, *Response, error) {
+ req, err := c.NewRequest("GET", "meta", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ meta := new(APIMeta)
+ resp, err := c.Do(req, meta)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return meta, resp, nil
+}
+
+// Octocat returns an ASCII art octocat with the specified message in a speech
+// bubble. If message is empty, a random zen phrase is used.
+func (c *Client) Octocat(message string) (string, *Response, error) {
+ u := "octocat"
+ if message != "" {
+ u = fmt.Sprintf("%s?s=%s", u, url.QueryEscape(message))
+ }
+
+ req, err := c.NewRequest("GET", u, nil)
+ if err != nil {
+ return "", nil, err
+ }
+
+ buf := new(bytes.Buffer)
+ resp, err := c.Do(req, buf)
+ if err != nil {
+ return "", resp, err
+ }
+
+ return buf.String(), resp, nil
+}
+
+// Zen returns a random line from The Zen of GitHub.
+//
+// see also: http://warpspire.com/posts/taste/
+func (c *Client) Zen() (string, *Response, error) {
+ req, err := c.NewRequest("GET", "zen", nil)
+ if err != nil {
+ return "", nil, err
+ }
+
+ buf := new(bytes.Buffer)
+ resp, err := c.Do(req, buf)
+ if err != nil {
+ return "", resp, err
+ }
+
+ return buf.String(), resp, nil
+}
+
+// ServiceHook represents a hook that has configuration settings, a list of
+// available events, and default events.
+type ServiceHook struct {
+ Name *string `json:"name,omitempty"`
+ Events []string `json:"events,omitempty"`
+ SupportedEvents []string `json:"supported_events,omitempty"`
+ Schema [][]string `json:"schema,omitempty"`
+}
+
+func (s *ServiceHook) String() string {
+ return Stringify(s)
+}
+
+// ListServiceHooks lists all of the available service hooks.
+//
+// GitHub API docs: https://developer.github.com/webhooks/#services
+func (c *Client) ListServiceHooks() ([]*ServiceHook, *Response, error) {
+ u := "hooks"
+ req, err := c.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ hooks := new([]*ServiceHook)
+ resp, err := c.Do(req, hooks)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *hooks, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/orgs.go b/vendor/github.com/google/go-github/github/orgs.go
new file mode 100644
index 0000000..d137e3e
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/orgs.go
@@ -0,0 +1,173 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// OrganizationsService provides access to the organization related functions
+// in the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/
+type OrganizationsService service
+
+// Organization represents a GitHub organization account.
+type Organization struct {
+ Login *string `json:"login,omitempty"`
+ ID *int `json:"id,omitempty"`
+ AvatarURL *string `json:"avatar_url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Company *string `json:"company,omitempty"`
+ Blog *string `json:"blog,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Email *string `json:"email,omitempty"`
+ Description *string `json:"description,omitempty"`
+ PublicRepos *int `json:"public_repos,omitempty"`
+ PublicGists *int `json:"public_gists,omitempty"`
+ Followers *int `json:"followers,omitempty"`
+ Following *int `json:"following,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ TotalPrivateRepos *int `json:"total_private_repos,omitempty"`
+ OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"`
+ PrivateGists *int `json:"private_gists,omitempty"`
+ DiskUsage *int `json:"disk_usage,omitempty"`
+ Collaborators *int `json:"collaborators,omitempty"`
+ BillingEmail *string `json:"billing_email,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Plan *Plan `json:"plan,omitempty"`
+
+ // API URLs
+ URL *string `json:"url,omitempty"`
+ EventsURL *string `json:"events_url,omitempty"`
+ HooksURL *string `json:"hooks_url,omitempty"`
+ IssuesURL *string `json:"issues_url,omitempty"`
+ MembersURL *string `json:"members_url,omitempty"`
+ PublicMembersURL *string `json:"public_members_url,omitempty"`
+ ReposURL *string `json:"repos_url,omitempty"`
+}
+
+func (o Organization) String() string {
+ return Stringify(o)
+}
+
+// Plan represents the payment plan for an account. See plans at https://github.com/plans.
+type Plan struct {
+ Name *string `json:"name,omitempty"`
+ Space *int `json:"space,omitempty"`
+ Collaborators *int `json:"collaborators,omitempty"`
+ PrivateRepos *int `json:"private_repos,omitempty"`
+}
+
+func (p Plan) String() string {
+ return Stringify(p)
+}
+
+// OrganizationsListOptions specifies the optional parameters to the
+// OrganizationsService.ListAll method.
+type OrganizationsListOptions struct {
+ // Since filters Organizations by ID.
+ Since int `url:"since,omitempty"`
+
+ ListOptions
+}
+
+// ListAll lists all organizations, in the order that they were created on GitHub.
+//
+// Note: Pagination is powered exclusively by the since parameter. To continue
+// listing the next set of organizations, use the ID of the last-returned organization
+// as the opts.Since parameter for the next call.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/#list-all-organizations
+func (s *OrganizationsService) ListAll(opt *OrganizationsListOptions) ([]*Organization, *Response, error) {
+ u, err := addOptions("organizations", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ orgs := []*Organization{}
+ resp, err := s.client.Do(req, &orgs)
+ if err != nil {
+ return nil, resp, err
+ }
+ return orgs, resp, err
+}
+
+// List the organizations for a user. Passing the empty string will list
+// organizations for the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/#list-user-organizations
+func (s *OrganizationsService) List(user string, opt *ListOptions) ([]*Organization, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/orgs", user)
+ } else {
+ u = "user/orgs"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ orgs := new([]*Organization)
+ resp, err := s.client.Do(req, orgs)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *orgs, resp, err
+}
+
+// Get fetches an organization by name.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/#get-an-organization
+func (s *OrganizationsService) Get(org string) (*Organization, *Response, error) {
+ u := fmt.Sprintf("orgs/%v", org)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ organization := new(Organization)
+ resp, err := s.client.Do(req, organization)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return organization, resp, err
+}
+
+// Edit an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/#edit-an-organization
+func (s *OrganizationsService) Edit(name string, org *Organization) (*Organization, *Response, error) {
+ u := fmt.Sprintf("orgs/%v", name)
+ req, err := s.client.NewRequest("PATCH", u, org)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ o := new(Organization)
+ resp, err := s.client.Do(req, o)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return o, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/orgs_hooks.go b/vendor/github.com/google/go-github/github/orgs_hooks.go
new file mode 100644
index 0000000..95b8322
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/orgs_hooks.go
@@ -0,0 +1,104 @@
+// Copyright 2015 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// ListHooks lists all Hooks for the specified organization.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#list-hooks
+func (s *OrganizationsService) ListHooks(org string, opt *ListOptions) ([]*Hook, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/hooks", org)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ hooks := new([]*Hook)
+ resp, err := s.client.Do(req, hooks)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *hooks, resp, err
+}
+
+// GetHook returns a single specified Hook.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#get-single-hook
+func (s *OrganizationsService) GetHook(org string, id int) (*Hook, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ hook := new(Hook)
+ resp, err := s.client.Do(req, hook)
+ return hook, resp, err
+}
+
+// CreateHook creates a Hook for the specified org.
+// Name and Config are required fields.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#create-a-hook
+func (s *OrganizationsService) CreateHook(org string, hook *Hook) (*Hook, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/hooks", org)
+ req, err := s.client.NewRequest("POST", u, hook)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ h := new(Hook)
+ resp, err := s.client.Do(req, h)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return h, resp, err
+}
+
+// EditHook updates a specified Hook.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#edit-a-hook
+func (s *OrganizationsService) EditHook(org string, id int, hook *Hook) (*Hook, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
+ req, err := s.client.NewRequest("PATCH", u, hook)
+ if err != nil {
+ return nil, nil, err
+ }
+ h := new(Hook)
+ resp, err := s.client.Do(req, h)
+ return h, resp, err
+}
+
+// PingHook triggers a 'ping' event to be sent to the Hook.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#ping-a-hook
+func (s *OrganizationsService) PingHook(org string, id int) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/hooks/%d/pings", org, id)
+ req, err := s.client.NewRequest("POST", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// DeleteHook deletes a specified Hook.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#delete-a-hook
+func (s *OrganizationsService) DeleteHook(org string, id int) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/orgs_members.go b/vendor/github.com/google/go-github/github/orgs_members.go
new file mode 100644
index 0000000..80454ad
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/orgs_members.go
@@ -0,0 +1,272 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Membership represents the status of a user's membership in an organization or team.
+type Membership struct {
+ URL *string `json:"url,omitempty"`
+
+ // State is the user's status within the organization or team.
+ // Possible values are: "active", "pending"
+ State *string `json:"state,omitempty"`
+
+ // Role identifies the user's role within the organization or team.
+ // Possible values for organization membership:
+ // member - non-owner organization member
+ // admin - organization owner
+ //
+ // Possible values for team membership are:
+ // member - a normal member of the team
+ // maintainer - a team maintainer. Able to add/remove other team
+ // members, promote other team members to team
+ // maintainer, and edit the team’s name and description
+ Role *string `json:"role,omitempty"`
+
+ // For organization membership, the API URL of the organization.
+ OrganizationURL *string `json:"organization_url,omitempty"`
+
+ // For organization membership, the organization the membership is for.
+ Organization *Organization `json:"organization,omitempty"`
+
+ // For organization membership, the user the membership is for.
+ User *User `json:"user,omitempty"`
+}
+
+func (m Membership) String() string {
+ return Stringify(m)
+}
+
+// ListMembersOptions specifies optional parameters to the
+// OrganizationsService.ListMembers method.
+type ListMembersOptions struct {
+ // If true (or if the authenticated user is not an owner of the
+ // organization), list only publicly visible members.
+ PublicOnly bool `url:"-"`
+
+ // Filter members returned in the list. Possible values are:
+ // 2fa_disabled, all. Default is "all".
+ Filter string `url:"filter,omitempty"`
+
+ // Role filters members returned by their role in the organization.
+ // Possible values are:
+ // all - all members of the organization, regardless of role
+ // admin - organization owners
+ // member - non-organization members
+ //
+ // Default is "all".
+ Role string `url:"role,omitempty"`
+
+ ListOptions
+}
+
+// ListMembers lists the members for an organization. If the authenticated
+// user is an owner of the organization, this will return both concealed and
+// public members, otherwise it will only return public members.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/members/#members-list
+func (s *OrganizationsService) ListMembers(org string, opt *ListMembersOptions) ([]*User, *Response, error) {
+ var u string
+ if opt != nil && opt.PublicOnly {
+ u = fmt.Sprintf("orgs/%v/public_members", org)
+ } else {
+ u = fmt.Sprintf("orgs/%v/members", org)
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ members := new([]*User)
+ resp, err := s.client.Do(req, members)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *members, resp, err
+}
+
+// IsMember checks if a user is a member of an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/members/#check-membership
+func (s *OrganizationsService) IsMember(org, user string) (bool, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/members/%v", org, user)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resp, err := s.client.Do(req, nil)
+ member, err := parseBoolResponse(err)
+ return member, resp, err
+}
+
+// IsPublicMember checks if a user is a public member of an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/members/#check-public-membership
+func (s *OrganizationsService) IsPublicMember(org, user string) (bool, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resp, err := s.client.Do(req, nil)
+ member, err := parseBoolResponse(err)
+ return member, resp, err
+}
+
+// RemoveMember removes a user from all teams of an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/members/#remove-a-member
+func (s *OrganizationsService) RemoveMember(org, user string) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/members/%v", org, user)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// PublicizeMembership publicizes a user's membership in an organization. (A
+// user cannot publicize the membership for another user.)
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/members/#publicize-a-users-membership
+func (s *OrganizationsService) PublicizeMembership(org, user string) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// ConcealMembership conceals a user's membership in an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/members/#conceal-a-users-membership
+func (s *OrganizationsService) ConcealMembership(org, user string) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// ListOrgMembershipsOptions specifies optional parameters to the
+// OrganizationsService.ListOrgMemberships method.
+type ListOrgMembershipsOptions struct {
+ // Filter memberships to include only those with the specified state.
+ // Possible values are: "active", "pending".
+ State string `url:"state,omitempty"`
+
+ ListOptions
+}
+
+// ListOrgMemberships lists the organization memberships for the authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-your-organization-memberships
+func (s *OrganizationsService) ListOrgMemberships(opt *ListOrgMembershipsOptions) ([]*Membership, *Response, error) {
+ u := "user/memberships/orgs"
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var memberships []*Membership
+ resp, err := s.client.Do(req, &memberships)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return memberships, resp, err
+}
+
+// GetOrgMembership gets the membership for a user in a specified organization.
+// Passing an empty string for user will get the membership for the
+// authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/members/#get-organization-membership
+// GitHub API docs: https://developer.github.com/v3/orgs/members/#get-your-organization-membership
+func (s *OrganizationsService) GetOrgMembership(user, org string) (*Membership, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
+ } else {
+ u = fmt.Sprintf("user/memberships/orgs/%v", org)
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ membership := new(Membership)
+ resp, err := s.client.Do(req, membership)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return membership, resp, err
+}
+
+// EditOrgMembership edits the membership for user in specified organization.
+// Passing an empty string for user will edit the membership for the
+// authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/members/#add-or-update-organization-membership
+// GitHub API docs: https://developer.github.com/v3/orgs/members/#edit-your-organization-membership
+func (s *OrganizationsService) EditOrgMembership(user, org string, membership *Membership) (*Membership, *Response, error) {
+ var u, method string
+ if user != "" {
+ u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
+ method = "PUT"
+ } else {
+ u = fmt.Sprintf("user/memberships/orgs/%v", org)
+ method = "PATCH"
+ }
+
+ req, err := s.client.NewRequest(method, u, membership)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m := new(Membership)
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, err
+}
+
+// RemoveOrgMembership removes user from the specified organization. If the
+// user has been invited to the organization, this will cancel their invitation.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/members/#remove-organization-membership
+func (s *OrganizationsService) RemoveOrgMembership(user, org string) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/memberships/%v", org, user)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/orgs_teams.go b/vendor/github.com/google/go-github/github/orgs_teams.go
new file mode 100644
index 0000000..6afcd1f
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/orgs_teams.go
@@ -0,0 +1,424 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// Team represents a team within a GitHub organization. Teams are used to
+// manage access to an organization's repositories.
+type Team struct {
+ ID *int `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Slug *string `json:"slug,omitempty"`
+
+ // Permission is deprecated when creating or editing a team in an org
+ // using the new GitHub permission model. It no longer identifies the
+ // permission a team has on its repos, but only specifies the default
+ // permission a repo is initially added with. Avoid confusion by
+ // specifying a permission value when calling AddTeamRepo.
+ Permission *string `json:"permission,omitempty"`
+
+ // Privacy identifies the level of privacy this team should have.
+ // Possible values are:
+ // secret - only visible to organization owners and members of this team
+ // closed - visible to all members of this organization
+ // Default is "secret".
+ Privacy *string `json:"privacy,omitempty"`
+
+ MembersCount *int `json:"members_count,omitempty"`
+ ReposCount *int `json:"repos_count,omitempty"`
+ Organization *Organization `json:"organization,omitempty"`
+ MembersURL *string `json:"members_url,omitempty"`
+ RepositoriesURL *string `json:"repositories_url,omitempty"`
+}
+
+func (t Team) String() string {
+ return Stringify(t)
+}
+
+// Invitation represents a team member's inviation status
+type Invitation struct {
+ ID *int `json:"id,omitempty"`
+ Login *string `json:"login,omitempty"`
+ Email *string `json:"email,omitempty"`
+ Role *string `json:"role,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+}
+
+func (i Invitation) String() string {
+ return Stringify(i)
+}
+
+// ListTeams lists all of the teams for an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-teams
+func (s *OrganizationsService) ListTeams(org string, opt *ListOptions) ([]*Team, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/teams", org)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ teams := new([]*Team)
+ resp, err := s.client.Do(req, teams)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *teams, resp, err
+}
+
+// GetTeam fetches a team by ID.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#get-team
+func (s *OrganizationsService) GetTeam(team int) (*Team, *Response, error) {
+ u := fmt.Sprintf("teams/%v", team)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Team)
+ resp, err := s.client.Do(req, t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// CreateTeam creates a new team within an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#create-team
+func (s *OrganizationsService) CreateTeam(org string, team *Team) (*Team, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/teams", org)
+ req, err := s.client.NewRequest("POST", u, team)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Team)
+ resp, err := s.client.Do(req, t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// EditTeam edits a team.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#edit-team
+func (s *OrganizationsService) EditTeam(id int, team *Team) (*Team, *Response, error) {
+ u := fmt.Sprintf("teams/%v", id)
+ req, err := s.client.NewRequest("PATCH", u, team)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Team)
+ resp, err := s.client.Do(req, t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// DeleteTeam deletes a team.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#delete-team
+func (s *OrganizationsService) DeleteTeam(team int) (*Response, error) {
+ u := fmt.Sprintf("teams/%v", team)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// OrganizationListTeamMembersOptions specifies the optional parameters to the
+// OrganizationsService.ListTeamMembers method.
+type OrganizationListTeamMembersOptions struct {
+ // Role filters members returned by their role in the team. Possible
+ // values are "all", "member", "maintainer". Default is "all".
+ Role string `url:"role,omitempty"`
+
+ ListOptions
+}
+
+// ListTeamMembers lists all of the users who are members of the specified
+// team.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-team-members
+func (s *OrganizationsService) ListTeamMembers(team int, opt *OrganizationListTeamMembersOptions) ([]*User, *Response, error) {
+ u := fmt.Sprintf("teams/%v/members", team)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ members := new([]*User)
+ resp, err := s.client.Do(req, members)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *members, resp, err
+}
+
+// IsTeamMember checks if a user is a member of the specified team.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#get-team-member
+func (s *OrganizationsService) IsTeamMember(team int, user string) (bool, *Response, error) {
+ u := fmt.Sprintf("teams/%v/members/%v", team, user)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resp, err := s.client.Do(req, nil)
+ member, err := parseBoolResponse(err)
+ return member, resp, err
+}
+
+// ListTeamRepos lists the repositories that the specified team has access to.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-team-repos
+func (s *OrganizationsService) ListTeamRepos(team int, opt *ListOptions) ([]*Repository, *Response, error) {
+ u := fmt.Sprintf("teams/%v/repos", team)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ repos := new([]*Repository)
+ resp, err := s.client.Do(req, repos)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *repos, resp, err
+}
+
+// IsTeamRepo checks if a team manages the specified repository. If the
+// repository is managed by team, a Repository is returned which includes the
+// permissions team has for that repo.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/teams/#check-if-a-team-manages-a-repository
+func (s *OrganizationsService) IsTeamRepo(team int, owner string, repo string) (*Repository, *Response, error) {
+ u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req.Header.Set("Accept", mediaTypeOrgPermissionRepo)
+
+ repository := new(Repository)
+ resp, err := s.client.Do(req, repository)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return repository, resp, err
+}
+
+// OrganizationAddTeamRepoOptions specifies the optional parameters to the
+// OrganizationsService.AddTeamRepo method.
+type OrganizationAddTeamRepoOptions struct {
+ // Permission specifies the permission to grant the team on this repository.
+ // Possible values are:
+ // pull - team members can pull, but not push to or administer this repository
+ // push - team members can pull and push, but not administer this repository
+ // admin - team members can pull, push and administer this repository
+ //
+ // If not specified, the team's permission attribute will be used.
+ Permission string `json:"permission,omitempty"`
+}
+
+// AddTeamRepo adds a repository to be managed by the specified team. The
+// specified repository must be owned by the organization to which the team
+// belongs, or a direct fork of a repository owned by the organization.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#add-team-repo
+func (s *OrganizationsService) AddTeamRepo(team int, owner string, repo string, opt *OrganizationAddTeamRepoOptions) (*Response, error) {
+ u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
+ req, err := s.client.NewRequest("PUT", u, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// RemoveTeamRepo removes a repository from being managed by the specified
+// team. Note that this does not delete the repository, it just removes it
+// from the team.
+//
+// GitHub API docs: http://developer.github.com/v3/orgs/teams/#remove-team-repo
+func (s *OrganizationsService) RemoveTeamRepo(team int, owner string, repo string) (*Response, error) {
+ u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// ListUserTeams lists a user's teams
+// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-user-teams
+func (s *OrganizationsService) ListUserTeams(opt *ListOptions) ([]*Team, *Response, error) {
+ u := "user/teams"
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ teams := new([]*Team)
+ resp, err := s.client.Do(req, teams)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *teams, resp, err
+}
+
+// GetTeamMembership returns the membership status for a user in a team.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/teams/#get-team-membership
+func (s *OrganizationsService) GetTeamMembership(team int, user string) (*Membership, *Response, error) {
+ u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Membership)
+ resp, err := s.client.Do(req, t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// OrganizationAddTeamMembershipOptions does stuff specifies the optional
+// parameters to the OrganizationsService.AddTeamMembership method.
+type OrganizationAddTeamMembershipOptions struct {
+ // Role specifies the role the user should have in the team. Possible
+ // values are:
+ // member - a normal member of the team
+ // maintainer - a team maintainer. Able to add/remove other team
+ // members, promote other team members to team
+ // maintainer, and edit the team’s name and description
+ //
+ // Default value is "member".
+ Role string `json:"role,omitempty"`
+}
+
+// AddTeamMembership adds or invites a user to a team.
+//
+// In order to add a membership between a user and a team, the authenticated
+// user must have 'admin' permissions to the team or be an owner of the
+// organization that the team is associated with.
+//
+// If the user is already a part of the team's organization (meaning they're on
+// at least one other team in the organization), this endpoint will add the
+// user to the team.
+//
+// If the user is completely unaffiliated with the team's organization (meaning
+// they're on none of the organization's teams), this endpoint will send an
+// invitation to the user via email. This newly-created membership will be in
+// the "pending" state until the user accepts the invitation, at which point
+// the membership will transition to the "active" state and the user will be
+// added as a member of the team.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/teams/#add-team-membership
+func (s *OrganizationsService) AddTeamMembership(team int, user string, opt *OrganizationAddTeamMembershipOptions) (*Membership, *Response, error) {
+ u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
+ req, err := s.client.NewRequest("PUT", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(Membership)
+ resp, err := s.client.Do(req, t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// RemoveTeamMembership removes a user from a team.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/teams/#remove-team-membership
+func (s *OrganizationsService) RemoveTeamMembership(team int, user string) (*Response, error) {
+ u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// ListPendingTeamInvitations get pending invitaion list in team.
+// Warning: The API may change without advance notice during the preview period.
+// Preview features are not supported for production use.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-pending-team-invitations
+func (s *OrganizationsService) ListPendingTeamInvitations(team int, opt *ListOptions) ([]*Invitation, *Response, error) {
+ u := fmt.Sprintf("teams/%v/invitations", team)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeOrgMembershipPreview)
+
+ pendingInvitations := new([]*Invitation)
+ resp, err := s.client.Do(req, pendingInvitations)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *pendingInvitations, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/projects.go b/vendor/github.com/google/go-github/github/projects.go
new file mode 100644
index 0000000..2330056
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/projects.go
@@ -0,0 +1,417 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// ProjectsService provides access to the projects functions in the
+// GitHub API.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/
+type ProjectsService service
+
+// Project represents a GitHub Project.
+type Project struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ OwnerURL *string `json:"owner_url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Body *string `json:"body,omitempty"`
+ Number *int `json:"number,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+
+ // The User object that generated the project.
+ Creator *User `json:"creator,omitempty"`
+}
+
+func (p Project) String() string {
+ return Stringify(p)
+}
+
+// GetProject gets a GitHub Project for a repo.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#get-a-project
+func (s *ProjectsService) GetProject(id int) (*Project, *Response, error) {
+ u := fmt.Sprintf("projects/%v", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ project := &Project{}
+ resp, err := s.client.Do(req, project)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return project, resp, err
+}
+
+// ProjectOptions specifies the parameters to the
+// RepositoriesService.CreateProject and
+// ProjectsService.UpdateProject methods.
+type ProjectOptions struct {
+ // The name of the project. (Required for creation; optional for update.)
+ Name string `json:"name,omitempty"`
+ // The body of the project. (Optional.)
+ Body string `json:"body,omitempty"`
+}
+
+// UpdateProject updates a repository project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#update-a-project
+func (s *ProjectsService) UpdateProject(id int, opt *ProjectOptions) (*Project, *Response, error) {
+ u := fmt.Sprintf("projects/%v", id)
+ req, err := s.client.NewRequest("PATCH", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ project := &Project{}
+ resp, err := s.client.Do(req, project)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return project, resp, err
+}
+
+// DeleteProject deletes a GitHub Project from a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#delete-a-project
+func (s *ProjectsService) DeleteProject(id int) (*Response, error) {
+ u := fmt.Sprintf("projects/%v", id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// ProjectColumn represents a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/projects/
+type ProjectColumn struct {
+ ID *int `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ ProjectURL *string `json:"project_url,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+}
+
+// ListProjectColumns lists the columns of a GitHub Project for a repo.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#list-project-columns
+func (s *ProjectsService) ListProjectColumns(projectID int, opt *ListOptions) ([]*ProjectColumn, *Response, error) {
+ u := fmt.Sprintf("projects/%v/columns", projectID)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ columns := []*ProjectColumn{}
+ resp, err := s.client.Do(req, &columns)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return columns, resp, err
+}
+
+// GetProjectColumn gets a column of a GitHub Project for a repo.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#get-a-project-column
+func (s *ProjectsService) GetProjectColumn(id int) (*ProjectColumn, *Response, error) {
+ u := fmt.Sprintf("projects/columns/%v", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ column := &ProjectColumn{}
+ resp, err := s.client.Do(req, column)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return column, resp, err
+}
+
+// ProjectColumnOptions specifies the parameters to the
+// ProjectsService.CreateProjectColumn and
+// ProjectsService.UpdateProjectColumn methods.
+type ProjectColumnOptions struct {
+ // The name of the project column. (Required for creation and update.)
+ Name string `json:"name"`
+}
+
+// CreateProjectColumn creates a column for the specified (by number) project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#create-a-project-column
+func (s *ProjectsService) CreateProjectColumn(projectID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
+ u := fmt.Sprintf("projects/%v/columns", projectID)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ column := &ProjectColumn{}
+ resp, err := s.client.Do(req, column)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return column, resp, err
+}
+
+// UpdateProjectColumn updates a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#update-a-project-column
+func (s *ProjectsService) UpdateProjectColumn(columnID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
+ u := fmt.Sprintf("projects/columns/%v", columnID)
+ req, err := s.client.NewRequest("PATCH", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ column := &ProjectColumn{}
+ resp, err := s.client.Do(req, column)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return column, resp, err
+}
+
+// DeleteProjectColumn deletes a column from a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#delete-a-project-column
+func (s *ProjectsService) DeleteProjectColumn(columnID int) (*Response, error) {
+ u := fmt.Sprintf("projects/columns/%v", columnID)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// ProjectColumnMoveOptions specifies the parameters to the
+// ProjectsService.MoveProjectColumn method.
+type ProjectColumnMoveOptions struct {
+ // Position can be one of "first", "last", or "after:", where
+ // is the ID of a column in the same project. (Required.)
+ Position string `json:"position"`
+}
+
+// MoveProjectColumn moves a column within a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#move-a-project-column
+func (s *ProjectsService) MoveProjectColumn(columnID int, opt *ProjectColumnMoveOptions) (*Response, error) {
+ u := fmt.Sprintf("projects/columns/%v/moves", columnID)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// ProjectCard represents a card in a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/projects/
+type ProjectCard struct {
+ ColumnURL *string `json:"column_url,omitempty"`
+ ContentURL *string `json:"content_url,omitempty"`
+ ID *int `json:"id,omitempty"`
+ Note *string `json:"note,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+}
+
+// ListProjectCards lists the cards in a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#list-project-cards
+func (s *ProjectsService) ListProjectCards(columnID int, opt *ListOptions) ([]*ProjectCard, *Response, error) {
+ u := fmt.Sprintf("projects/columns/%v/cards", columnID)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ cards := []*ProjectCard{}
+ resp, err := s.client.Do(req, &cards)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return cards, resp, err
+}
+
+// GetProjectCard gets a card in a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card
+func (s *ProjectsService) GetProjectCard(columnID int) (*ProjectCard, *Response, error) {
+ u := fmt.Sprintf("projects/columns/cards/%v", columnID)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ card := &ProjectCard{}
+ resp, err := s.client.Do(req, card)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return card, resp, err
+}
+
+// ProjectCardOptions specifies the parameters to the
+// ProjectsService.CreateProjectCard and
+// ProjectsService.UpdateProjectCard methods.
+type ProjectCardOptions struct {
+ // The note of the card. Note and ContentID are mutually exclusive.
+ Note string `json:"note,omitempty"`
+ // The ID (not Number) of the Issue or Pull Request to associate with this card.
+ // Note and ContentID are mutually exclusive.
+ ContentID int `json:"content_id,omitempty"`
+ // The type of content to associate with this card. Possible values are: "Issue", "PullRequest".
+ ContentType string `json:"content_type,omitempty"`
+}
+
+// CreateProjectCard creates a card in the specified column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#create-a-project-card
+func (s *ProjectsService) CreateProjectCard(columnID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) {
+ u := fmt.Sprintf("projects/columns/%v/cards", columnID)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ card := &ProjectCard{}
+ resp, err := s.client.Do(req, card)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return card, resp, err
+}
+
+// UpdateProjectCard updates a card of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#update-a-project-card
+func (s *ProjectsService) UpdateProjectCard(cardID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) {
+ u := fmt.Sprintf("projects/columns/cards/%v", cardID)
+ req, err := s.client.NewRequest("PATCH", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ card := &ProjectCard{}
+ resp, err := s.client.Do(req, card)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return card, resp, err
+}
+
+// DeleteProjectCard deletes a card from a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#delete-a-project-card
+func (s *ProjectsService) DeleteProjectCard(cardID int) (*Response, error) {
+ u := fmt.Sprintf("projects/columns/cards/%v", cardID)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// ProjectCardMoveOptions specifies the parameters to the
+// ProjectsService.MoveProjectCard method.
+type ProjectCardMoveOptions struct {
+ // Position can be one of "top", "bottom", or "after:", where
+ // is the ID of a card in the same project.
+ Position string `json:"position"`
+ // ColumnID is the ID of a column in the same project. Note that ColumnID
+ // is required when using Position "after:" when that card is in
+ // another column; otherwise it is optional.
+ ColumnID int `json:"column_id,omitempty"`
+}
+
+// MoveProjectCard moves a card within a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#move-a-project-card
+func (s *ProjectsService) MoveProjectCard(cardID int, opt *ProjectCardMoveOptions) (*Response, error) {
+ u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go
new file mode 100644
index 0000000..51c6ccb
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/pulls.go
@@ -0,0 +1,325 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+)
+
+// PullRequestsService handles communication with the pull request related
+// methods of the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/pulls/
+type PullRequestsService service
+
+// PullRequest represents a GitHub pull request on a repository.
+type PullRequest struct {
+ ID *int `json:"id,omitempty"`
+ Number *int `json:"number,omitempty"`
+ State *string `json:"state,omitempty"`
+ Title *string `json:"title,omitempty"`
+ Body *string `json:"body,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ ClosedAt *time.Time `json:"closed_at,omitempty"`
+ MergedAt *time.Time `json:"merged_at,omitempty"`
+ User *User `json:"user,omitempty"`
+ Merged *bool `json:"merged,omitempty"`
+ Mergeable *bool `json:"mergeable,omitempty"`
+ MergedBy *User `json:"merged_by,omitempty"`
+ Comments *int `json:"comments,omitempty"`
+ Commits *int `json:"commits,omitempty"`
+ Additions *int `json:"additions,omitempty"`
+ Deletions *int `json:"deletions,omitempty"`
+ ChangedFiles *int `json:"changed_files,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ IssueURL *string `json:"issue_url,omitempty"`
+ StatusesURL *string `json:"statuses_url,omitempty"`
+ DiffURL *string `json:"diff_url,omitempty"`
+ PatchURL *string `json:"patch_url,omitempty"`
+ ReviewCommentsURL *string `json:"review_comments_url,omitempty"`
+ ReviewCommentURL *string `json:"review_comment_url,omitempty"`
+ Assignee *User `json:"assignee,omitempty"`
+ Assignees []*User `json:"assignees,omitempty"`
+ Milestone *Milestone `json:"milestone,omitempty"`
+
+ Head *PullRequestBranch `json:"head,omitempty"`
+ Base *PullRequestBranch `json:"base,omitempty"`
+}
+
+func (p PullRequest) String() string {
+ return Stringify(p)
+}
+
+// PullRequestBranch represents a base or head branch in a GitHub pull request.
+type PullRequestBranch struct {
+ Label *string `json:"label,omitempty"`
+ Ref *string `json:"ref,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ Repo *Repository `json:"repo,omitempty"`
+ User *User `json:"user,omitempty"`
+}
+
+// PullRequestListOptions specifies the optional parameters to the
+// PullRequestsService.List method.
+type PullRequestListOptions struct {
+ // State filters pull requests based on their state. Possible values are:
+ // open, closed. Default is "open".
+ State string `url:"state,omitempty"`
+
+ // Head filters pull requests by head user and branch name in the format of:
+ // "user:ref-name".
+ Head string `url:"head,omitempty"`
+
+ // Base filters pull requests by base branch name.
+ Base string `url:"base,omitempty"`
+
+ // Sort specifies how to sort pull requests. Possible values are: created,
+ // updated, popularity, long-running. Default is "created".
+ Sort string `url:"sort,omitempty"`
+
+ // Direction in which to sort pull requests. Possible values are: asc, desc.
+ // If Sort is "created" or not specified, Default is "desc", otherwise Default
+ // is "asc"
+ Direction string `url:"direction,omitempty"`
+
+ ListOptions
+}
+
+// List the pull requests for the specified repository.
+//
+// GitHub API docs: http://developer.github.com/v3/pulls/#list-pull-requests
+func (s *PullRequestsService) List(owner string, repo string, opt *PullRequestListOptions) ([]*PullRequest, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pulls := new([]*PullRequest)
+ resp, err := s.client.Do(req, pulls)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *pulls, resp, err
+}
+
+// Get a single pull request.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/#get-a-single-pull-request
+func (s *PullRequestsService) Get(owner string, repo string, number int) (*PullRequest, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pull := new(PullRequest)
+ resp, err := s.client.Do(req, pull)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pull, resp, err
+}
+
+// GetRaw gets raw (diff or patch) format of a pull request.
+func (s *PullRequestsService) GetRaw(owner string, repo string, number int, opt RawOptions) (string, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return "", nil, err
+ }
+
+ switch opt.Type {
+ case Diff:
+ req.Header.Set("Accept", mediaTypeV3Diff)
+ case Patch:
+ req.Header.Set("Accept", mediaTypeV3Patch)
+ default:
+ return "", nil, fmt.Errorf("unsupported raw type %d", opt.Type)
+ }
+
+ ret := new(bytes.Buffer)
+ resp, err := s.client.Do(req, ret)
+ if err != nil {
+ return "", resp, err
+ }
+
+ return ret.String(), resp, err
+}
+
+// NewPullRequest represents a new pull request to be created.
+type NewPullRequest struct {
+ Title *string `json:"title,omitempty"`
+ Head *string `json:"head,omitempty"`
+ Base *string `json:"base,omitempty"`
+ Body *string `json:"body,omitempty"`
+ Issue *int `json:"issue,omitempty"`
+}
+
+// Create a new pull request on the specified repository.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/#create-a-pull-request
+func (s *PullRequestsService) Create(owner string, repo string, pull *NewPullRequest) (*PullRequest, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo)
+ req, err := s.client.NewRequest("POST", u, pull)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ p := new(PullRequest)
+ resp, err := s.client.Do(req, p)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return p, resp, err
+}
+
+// Edit a pull request.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/#update-a-pull-request
+func (s *PullRequestsService) Edit(owner string, repo string, number int, pull *PullRequest) (*PullRequest, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
+ req, err := s.client.NewRequest("PATCH", u, pull)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ p := new(PullRequest)
+ resp, err := s.client.Do(req, p)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return p, resp, err
+}
+
+// ListCommits lists the commits in a pull request.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request
+func (s *PullRequestsService) ListCommits(owner string, repo string, number int, opt *ListOptions) ([]*RepositoryCommit, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d/commits", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ commits := new([]*RepositoryCommit)
+ resp, err := s.client.Do(req, commits)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *commits, resp, err
+}
+
+// ListFiles lists the files in a pull request.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/#list-pull-requests-files
+func (s *PullRequestsService) ListFiles(owner string, repo string, number int, opt *ListOptions) ([]*CommitFile, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d/files", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ commitFiles := new([]*CommitFile)
+ resp, err := s.client.Do(req, commitFiles)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *commitFiles, resp, err
+}
+
+// IsMerged checks if a pull request has been merged.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged
+func (s *PullRequestsService) IsMerged(owner string, repo string, number int) (bool, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resp, err := s.client.Do(req, nil)
+ merged, err := parseBoolResponse(err)
+ return merged, resp, err
+}
+
+// PullRequestMergeResult represents the result of merging a pull request.
+type PullRequestMergeResult struct {
+ SHA *string `json:"sha,omitempty"`
+ Merged *bool `json:"merged,omitempty"`
+ Message *string `json:"message,omitempty"`
+}
+
+// PullRequestOptions lets you define how a pull request will be merged.
+type PullRequestOptions struct {
+ CommitTitle string // Extra detail to append to automatic commit message. (Optional.)
+ SHA string // SHA that pull request head must match to allow merge. (Optional.)
+
+ // The merge method to use. Possible values include: "merge", "squash", and "rebase" with the default being merge. (Optional.)
+ MergeMethod string
+}
+
+type pullRequestMergeRequest struct {
+ CommitMessage string `json:"commit_message"`
+ CommitTitle string `json:"commit_title,omitempty"`
+ MergeMethod string `json:"merge_method,omitempty"`
+ SHA string `json:"sha,omitempty"`
+}
+
+// Merge a pull request (Merge Button™).
+// commitMessage is the title for the automatic commit message.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-buttontrade
+func (s *PullRequestsService) Merge(owner string, repo string, number int, commitMessage string, options *PullRequestOptions) (*PullRequestMergeResult, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number)
+
+ pullRequestBody := &pullRequestMergeRequest{CommitMessage: commitMessage}
+ if options != nil {
+ pullRequestBody.CommitTitle = options.CommitTitle
+ pullRequestBody.MergeMethod = options.MergeMethod
+ pullRequestBody.SHA = options.SHA
+ }
+ req, err := s.client.NewRequest("PUT", u, pullRequestBody)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: This header will be unnecessary when the API is no longer in preview.
+ req.Header.Set("Accept", mediaTypeSquashPreview)
+
+ mergeResult := new(PullRequestMergeResult)
+ resp, err := s.client.Do(req, mergeResult)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return mergeResult, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/pulls_comments.go b/vendor/github.com/google/go-github/github/pulls_comments.go
new file mode 100644
index 0000000..c7af85a
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/pulls_comments.go
@@ -0,0 +1,156 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// PullRequestComment represents a comment left on a pull request.
+type PullRequestComment struct {
+ ID *int `json:"id,omitempty"`
+ InReplyTo *int `json:"in_reply_to,omitempty"`
+ Body *string `json:"body,omitempty"`
+ Path *string `json:"path,omitempty"`
+ DiffHunk *string `json:"diff_hunk,omitempty"`
+ Position *int `json:"position,omitempty"`
+ OriginalPosition *int `json:"original_position,omitempty"`
+ CommitID *string `json:"commit_id,omitempty"`
+ OriginalCommitID *string `json:"original_commit_id,omitempty"`
+ User *User `json:"user,omitempty"`
+ Reactions *Reactions `json:"reactions,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ PullRequestURL *string `json:"pull_request_url,omitempty"`
+}
+
+func (p PullRequestComment) String() string {
+ return Stringify(p)
+}
+
+// PullRequestListCommentsOptions specifies the optional parameters to the
+// PullRequestsService.ListComments method.
+type PullRequestListCommentsOptions struct {
+ // Sort specifies how to sort comments. Possible values are: created, updated.
+ Sort string `url:"sort,omitempty"`
+
+ // Direction in which to sort comments. Possible values are: asc, desc.
+ Direction string `url:"direction,omitempty"`
+
+ // Since filters comments by time.
+ Since time.Time `url:"since,omitempty"`
+
+ ListOptions
+}
+
+// ListComments lists all comments on the specified pull request. Specifying a
+// pull request number of 0 will return all comments on all pull requests for
+// the repository.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/comments/#list-comments-on-a-pull-request
+func (s *PullRequestsService) ListComments(owner string, repo string, number int, opt *PullRequestListCommentsOptions) ([]*PullRequestComment, *Response, error) {
+ var u string
+ if number == 0 {
+ u = fmt.Sprintf("repos/%v/%v/pulls/comments", owner, repo)
+ } else {
+ u = fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number)
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ comments := new([]*PullRequestComment)
+ resp, err := s.client.Do(req, comments)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *comments, resp, err
+}
+
+// GetComment fetches the specified pull request comment.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/comments/#get-a-single-comment
+func (s *PullRequestsService) GetComment(owner string, repo string, number int) (*PullRequestComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ comment := new(PullRequestComment)
+ resp, err := s.client.Do(req, comment)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return comment, resp, err
+}
+
+// CreateComment creates a new comment on the specified pull request.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/comments/#create-a-comment
+func (s *PullRequestsService) CreateComment(owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number)
+ req, err := s.client.NewRequest("POST", u, comment)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := new(PullRequestComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// EditComment updates a pull request comment.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/comments/#edit-a-comment
+func (s *PullRequestsService) EditComment(owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
+ req, err := s.client.NewRequest("PATCH", u, comment)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := new(PullRequestComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// DeleteComment deletes a pull request comment.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/comments/#delete-a-comment
+func (s *PullRequestsService) DeleteComment(owner string, repo string, number int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/pulls_reviews.go b/vendor/github.com/google/go-github/github/pulls_reviews.go
new file mode 100644
index 0000000..ae3cdd4
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/pulls_reviews.go
@@ -0,0 +1,19 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "time"
+
+// PullRequestReview represents a review of a pull request.
+type PullRequestReview struct {
+ ID *int `json:"id,omitempty"`
+ User *User `json:"user,omitempty"`
+ Body *string `json:"body,omitempty"`
+ SubmittedAt *time.Time `json:"submitted_at,omitempty"`
+
+ // State can be "approved", "rejected", or "commented".
+ State *string `json:"state,omitempty"`
+}
diff --git a/vendor/github.com/google/go-github/github/reactions.go b/vendor/github.com/google/go-github/github/reactions.go
new file mode 100644
index 0000000..03b131b
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/reactions.go
@@ -0,0 +1,270 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// ReactionsService provides access to the reactions-related functions in the
+// GitHub API.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/
+type ReactionsService service
+
+// Reaction represents a GitHub reaction.
+type Reaction struct {
+ // ID is the Reaction ID.
+ ID *int `json:"id,omitempty"`
+ User *User `json:"user,omitempty"`
+ // Content is the type of reaction.
+ // Possible values are:
+ // "+1", "-1", "laugh", "confused", "heart", "hooray".
+ Content *string `json:"content,omitempty"`
+}
+
+// Reactions represents a summary of GitHub reactions.
+type Reactions struct {
+ TotalCount *int `json:"total_count,omitempty"`
+ PlusOne *int `json:"+1,omitempty"`
+ MinusOne *int `json:"-1,omitempty"`
+ Laugh *int `json:"laugh,omitempty"`
+ Confused *int `json:"confused,omitempty"`
+ Heart *int `json:"heart,omitempty"`
+ Hooray *int `json:"hooray,omitempty"`
+ URL *string `json:"url,omitempty"`
+}
+
+func (r Reaction) String() string {
+ return Stringify(r)
+}
+
+// ListCommentReactions lists the reactions for a commit comment.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-a-commit-comment
+func (s *ReactionsService) ListCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ var m []*Reaction
+ resp, err := s.client.Do(req, &m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// CreateCommentReaction creates a reaction for a commit comment.
+// Note that if you have already created a reaction of type content, the
+// previously created reaction will be returned with Status: 200 OK.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-a-commit-comment
+func (s ReactionsService) CreateCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id)
+
+ body := &Reaction{Content: String(content)}
+ req, err := s.client.NewRequest("POST", u, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ m := &Reaction{}
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// ListIssueReactions lists the reactions for an issue.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue
+func (s *ReactionsService) ListIssueReactions(owner, repo string, number int, opt *ListOptions) ([]*Reaction, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ var m []*Reaction
+ resp, err := s.client.Do(req, &m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// CreateIssueReaction creates a reaction for an issue.
+// Note that if you have already created a reaction of type content, the
+// previously created reaction will be returned with Status: 200 OK.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue
+func (s ReactionsService) CreateIssueReaction(owner, repo string, number int, content string) (*Reaction, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number)
+
+ body := &Reaction{Content: String(content)}
+ req, err := s.client.NewRequest("POST", u, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ m := &Reaction{}
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// ListIssueCommentReactions lists the reactions for an issue comment.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue-comment
+func (s *ReactionsService) ListIssueCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ var m []*Reaction
+ resp, err := s.client.Do(req, &m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// CreateIssueCommentReaction creates a reaction for an issue comment.
+// Note that if you have already created a reaction of type content, the
+// previously created reaction will be returned with Status: 200 OK.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue-comment
+func (s ReactionsService) CreateIssueCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id)
+
+ body := &Reaction{Content: String(content)}
+ req, err := s.client.NewRequest("POST", u, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ m := &Reaction{}
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// ListPullRequestCommentReactions lists the reactions for a pull request review comment.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue-comment
+func (s *ReactionsService) ListPullRequestCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ var m []*Reaction
+ resp, err := s.client.Do(req, &m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// CreatePullRequestCommentReaction creates a reaction for a pull request review comment.
+// Note that if you have already created a reaction of type content, the
+// previously created reaction will be returned with Status: 200 OK.
+//
+// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue-comment
+func (s ReactionsService) CreatePullRequestCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id)
+
+ body := &Reaction{Content: String(content)}
+ req, err := s.client.NewRequest("POST", u, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ m := &Reaction{}
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, nil
+}
+
+// DeleteReaction deletes a reaction.
+//
+// GitHub API docs: https://developer.github.com/v3/reaction/reactions/#delete-a-reaction-archive
+func (s *ReactionsService) DeleteReaction(id int) (*Response, error) {
+ u := fmt.Sprintf("reactions/%v", id)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go
new file mode 100644
index 0000000..551e9ea
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos.go
@@ -0,0 +1,686 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "strings"
+)
+
+// RepositoriesService handles communication with the repository related
+// methods of the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/
+type RepositoriesService service
+
+// Repository represents a GitHub repository.
+type Repository struct {
+ ID *int `json:"id,omitempty"`
+ Owner *User `json:"owner,omitempty"`
+ Name *string `json:"name,omitempty"`
+ FullName *string `json:"full_name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Homepage *string `json:"homepage,omitempty"`
+ DefaultBranch *string `json:"default_branch,omitempty"`
+ MasterBranch *string `json:"master_branch,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ PushedAt *Timestamp `json:"pushed_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ CloneURL *string `json:"clone_url,omitempty"`
+ GitURL *string `json:"git_url,omitempty"`
+ MirrorURL *string `json:"mirror_url,omitempty"`
+ SSHURL *string `json:"ssh_url,omitempty"`
+ SVNURL *string `json:"svn_url,omitempty"`
+ Language *string `json:"language,omitempty"`
+ Fork *bool `json:"fork"`
+ ForksCount *int `json:"forks_count,omitempty"`
+ NetworkCount *int `json:"network_count,omitempty"`
+ OpenIssuesCount *int `json:"open_issues_count,omitempty"`
+ StargazersCount *int `json:"stargazers_count,omitempty"`
+ SubscribersCount *int `json:"subscribers_count,omitempty"`
+ WatchersCount *int `json:"watchers_count,omitempty"`
+ Size *int `json:"size,omitempty"`
+ AutoInit *bool `json:"auto_init,omitempty"`
+ Parent *Repository `json:"parent,omitempty"`
+ Source *Repository `json:"source,omitempty"`
+ Organization *Organization `json:"organization,omitempty"`
+ Permissions *map[string]bool `json:"permissions,omitempty"`
+ AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"`
+ AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"`
+ AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"`
+
+ // Only provided when using RepositoriesService.Get while in preview
+ License *License `json:"license,omitempty"`
+
+ // Additional mutable fields when creating and editing a repository
+ Private *bool `json:"private"`
+ HasIssues *bool `json:"has_issues"`
+ HasWiki *bool `json:"has_wiki"`
+ HasPages *bool `json:"has_pages"`
+ HasDownloads *bool `json:"has_downloads"`
+ LicenseTemplate *string `json:"license_template,omitempty"`
+ GitignoreTemplate *string `json:"gitignore_template,omitempty"`
+
+ // Creating an organization repository. Required for non-owners.
+ TeamID *int `json:"team_id"`
+
+ // API URLs
+ URL *string `json:"url,omitempty"`
+ ArchiveURL *string `json:"archive_url,omitempty"`
+ AssigneesURL *string `json:"assignees_url,omitempty"`
+ BlobsURL *string `json:"blobs_url,omitempty"`
+ BranchesURL *string `json:"branches_url,omitempty"`
+ CollaboratorsURL *string `json:"collaborators_url,omitempty"`
+ CommentsURL *string `json:"comments_url,omitempty"`
+ CommitsURL *string `json:"commits_url,omitempty"`
+ CompareURL *string `json:"compare_url,omitempty"`
+ ContentsURL *string `json:"contents_url,omitempty"`
+ ContributorsURL *string `json:"contributors_url,omitempty"`
+ DeploymentsURL *string `json:"deployments_url,omitempty"`
+ DownloadsURL *string `json:"downloads_url,omitempty"`
+ EventsURL *string `json:"events_url,omitempty"`
+ ForksURL *string `json:"forks_url,omitempty"`
+ GitCommitsURL *string `json:"git_commits_url,omitempty"`
+ GitRefsURL *string `json:"git_refs_url,omitempty"`
+ GitTagsURL *string `json:"git_tags_url,omitempty"`
+ HooksURL *string `json:"hooks_url,omitempty"`
+ IssueCommentURL *string `json:"issue_comment_url,omitempty"`
+ IssueEventsURL *string `json:"issue_events_url,omitempty"`
+ IssuesURL *string `json:"issues_url,omitempty"`
+ KeysURL *string `json:"keys_url,omitempty"`
+ LabelsURL *string `json:"labels_url,omitempty"`
+ LanguagesURL *string `json:"languages_url,omitempty"`
+ MergesURL *string `json:"merges_url,omitempty"`
+ MilestonesURL *string `json:"milestones_url,omitempty"`
+ NotificationsURL *string `json:"notifications_url,omitempty"`
+ PullsURL *string `json:"pulls_url,omitempty"`
+ ReleasesURL *string `json:"releases_url,omitempty"`
+ StargazersURL *string `json:"stargazers_url,omitempty"`
+ StatusesURL *string `json:"statuses_url,omitempty"`
+ SubscribersURL *string `json:"subscribers_url,omitempty"`
+ SubscriptionURL *string `json:"subscription_url,omitempty"`
+ TagsURL *string `json:"tags_url,omitempty"`
+ TreesURL *string `json:"trees_url,omitempty"`
+ TeamsURL *string `json:"teams_url,omitempty"`
+
+ // TextMatches is only populated from search results that request text matches
+ // See: search.go and https://developer.github.com/v3/search/#text-match-metadata
+ TextMatches []TextMatch `json:"text_matches,omitempty"`
+}
+
+func (r Repository) String() string {
+ return Stringify(r)
+}
+
+// RepositoryListOptions specifies the optional parameters to the
+// RepositoriesService.List method.
+type RepositoryListOptions struct {
+ // Visibility of repositories to list. Can be one of all, public, or private.
+ // Default: all
+ Visibility string `url:"visibility,omitempty"`
+
+ // List repos of given affiliation[s].
+ // Comma-separated list of values. Can include:
+ // * owner: Repositories that are owned by the authenticated user.
+ // * collaborator: Repositories that the user has been added to as a
+ // collaborator.
+ // * organization_member: Repositories that the user has access to through
+ // being a member of an organization. This includes every repository on
+ // every team that the user is on.
+ // Default: owner,collaborator,organization_member
+ Affiliation string `url:"affiliation,omitempty"`
+
+ // Type of repositories to list.
+ // Can be one of all, owner, public, private, member. Default: all
+ // Will cause a 422 error if used in the same request as visibility or
+ // affiliation.
+ Type string `url:"type,omitempty"`
+
+ // How to sort the repository list. Can be one of created, updated, pushed,
+ // full_name. Default: full_name
+ Sort string `url:"sort,omitempty"`
+
+ // Direction in which to sort repositories. Can be one of asc or desc.
+ // Default: when using full_name: asc; otherwise desc
+ Direction string `url:"direction,omitempty"`
+
+ ListOptions
+}
+
+// List the repositories for a user. Passing the empty string will list
+// repositories for the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/#list-user-repositories
+func (s *RepositoriesService) List(user string, opt *RepositoryListOptions) ([]*Repository, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/repos", user)
+ } else {
+ u = "user/repos"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when license support fully launches
+ req.Header.Set("Accept", mediaTypeLicensesPreview)
+
+ repos := new([]*Repository)
+ resp, err := s.client.Do(req, repos)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *repos, resp, err
+}
+
+// RepositoryListByOrgOptions specifies the optional parameters to the
+// RepositoriesService.ListByOrg method.
+type RepositoryListByOrgOptions struct {
+ // Type of repositories to list. Possible values are: all, public, private,
+ // forks, sources, member. Default is "all".
+ Type string `url:"type,omitempty"`
+
+ ListOptions
+}
+
+// ListByOrg lists the repositories for an organization.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/#list-organization-repositories
+func (s *RepositoriesService) ListByOrg(org string, opt *RepositoryListByOrgOptions) ([]*Repository, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/repos", org)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when license support fully launches
+ req.Header.Set("Accept", mediaTypeLicensesPreview)
+
+ repos := new([]*Repository)
+ resp, err := s.client.Do(req, repos)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *repos, resp, err
+}
+
+// RepositoryListAllOptions specifies the optional parameters to the
+// RepositoriesService.ListAll method.
+type RepositoryListAllOptions struct {
+ // ID of the last repository seen
+ Since int `url:"since,omitempty"`
+
+ ListOptions
+}
+
+// ListAll lists all GitHub repositories in the order that they were created.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/#list-all-public-repositories
+func (s *RepositoriesService) ListAll(opt *RepositoryListAllOptions) ([]*Repository, *Response, error) {
+ u, err := addOptions("repositories", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ repos := new([]*Repository)
+ resp, err := s.client.Do(req, repos)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *repos, resp, err
+}
+
+// Create a new repository. If an organization is specified, the new
+// repository will be created under that org. If the empty string is
+// specified, it will be created for the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/#create
+func (s *RepositoriesService) Create(org string, repo *Repository) (*Repository, *Response, error) {
+ var u string
+ if org != "" {
+ u = fmt.Sprintf("orgs/%v/repos", org)
+ } else {
+ u = "user/repos"
+ }
+
+ req, err := s.client.NewRequest("POST", u, repo)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := new(Repository)
+ resp, err := s.client.Do(req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
+
+// Get fetches a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/#get
+func (s *RepositoriesService) Get(owner, repo string) (*Repository, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when the license support fully launches
+ // https://developer.github.com/v3/licenses/#get-a-repositorys-license
+ acceptHeaders := []string{mediaTypeLicensesPreview, mediaTypeSquashPreview}
+ req.Header.Set("Accept", strings.Join(acceptHeaders, ", "))
+
+ repository := new(Repository)
+ resp, err := s.client.Do(req, repository)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return repository, resp, err
+}
+
+// GetByID fetches a repository.
+//
+// Note: GetByID uses the undocumented GitHub API endpoint /repositories/:id.
+func (s *RepositoriesService) GetByID(id int) (*Repository, *Response, error) {
+ u := fmt.Sprintf("repositories/%d", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when the license support fully launches
+ // https://developer.github.com/v3/licenses/#get-a-repositorys-license
+ req.Header.Set("Accept", mediaTypeLicensesPreview)
+
+ repository := new(Repository)
+ resp, err := s.client.Do(req, repository)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return repository, resp, err
+}
+
+// Edit updates a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/#edit
+func (s *RepositoriesService) Edit(owner, repo string, repository *Repository) (*Repository, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v", owner, repo)
+ req, err := s.client.NewRequest("PATCH", u, repository)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: Remove this preview header after API is fully vetted.
+ req.Header.Add("Accept", mediaTypeSquashPreview)
+
+ r := new(Repository)
+ resp, err := s.client.Do(req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
+
+// Delete a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/#delete-a-repository
+func (s *RepositoriesService) Delete(owner, repo string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v", owner, repo)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// Contributor represents a repository contributor
+type Contributor struct {
+ Login *string `json:"login,omitempty"`
+ ID *int `json:"id,omitempty"`
+ AvatarURL *string `json:"avatar_url,omitempty"`
+ GravatarID *string `json:"gravatar_id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ FollowersURL *string `json:"followers_url,omitempty"`
+ FollowingURL *string `json:"following_url,omitempty"`
+ GistsURL *string `json:"gists_url,omitempty"`
+ StarredURL *string `json:"starred_url,omitempty"`
+ SubscriptionsURL *string `json:"subscriptions_url,omitempty"`
+ OrganizationsURL *string `json:"organizations_url,omitempty"`
+ ReposURL *string `json:"repos_url,omitempty"`
+ EventsURL *string `json:"events_url,omitempty"`
+ ReceivedEventsURL *string `json:"received_events_url,omitempty"`
+ Type *string `json:"type,omitempty"`
+ SiteAdmin *bool `json:"site_admin"`
+ Contributions *int `json:"contributions,omitempty"`
+}
+
+// ListContributorsOptions specifies the optional parameters to the
+// RepositoriesService.ListContributors method.
+type ListContributorsOptions struct {
+ // Include anonymous contributors in results or not
+ Anon string `url:"anon,omitempty"`
+
+ ListOptions
+}
+
+// ListContributors lists contributors for a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/#list-contributors
+func (s *RepositoriesService) ListContributors(owner string, repository string, opt *ListContributorsOptions) ([]*Contributor, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/contributors", owner, repository)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ contributor := new([]*Contributor)
+ resp, err := s.client.Do(req, contributor)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return *contributor, resp, err
+}
+
+// ListLanguages lists languages for the specified repository. The returned map
+// specifies the languages and the number of bytes of code written in that
+// language. For example:
+//
+// {
+// "C": 78769,
+// "Python": 7769
+// }
+//
+// GitHub API Docs: http://developer.github.com/v3/repos/#list-languages
+func (s *RepositoriesService) ListLanguages(owner string, repo string) (map[string]int, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/languages", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ languages := make(map[string]int)
+ resp, err := s.client.Do(req, &languages)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return languages, resp, err
+}
+
+// ListTeams lists the teams for the specified repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/#list-teams
+func (s *RepositoriesService) ListTeams(owner string, repo string, opt *ListOptions) ([]*Team, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/teams", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ teams := new([]*Team)
+ resp, err := s.client.Do(req, teams)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *teams, resp, err
+}
+
+// RepositoryTag represents a repository tag.
+type RepositoryTag struct {
+ Name *string `json:"name,omitempty"`
+ Commit *Commit `json:"commit,omitempty"`
+ ZipballURL *string `json:"zipball_url,omitempty"`
+ TarballURL *string `json:"tarball_url,omitempty"`
+}
+
+// ListTags lists tags for the specified repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/#list-tags
+func (s *RepositoriesService) ListTags(owner string, repo string, opt *ListOptions) ([]*RepositoryTag, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/tags", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ tags := new([]*RepositoryTag)
+ resp, err := s.client.Do(req, tags)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *tags, resp, err
+}
+
+// Branch represents a repository branch
+type Branch struct {
+ Name *string `json:"name,omitempty"`
+ Commit *RepositoryCommit `json:"commit,omitempty"`
+ Protected *bool `json:"protected,omitempty"`
+}
+
+// Protection represents a repository branch's protection.
+type Protection struct {
+ RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"`
+ RequiredPullRequestReviews *RequiredPullRequestReviews `json:"required_pull_request_reviews"`
+ Restrictions *BranchRestrictions `json:"restrictions"`
+}
+
+// ProtectionRequest represents a request to create/edit a branch's protection.
+type ProtectionRequest struct {
+ RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"`
+ RequiredPullRequestReviews *RequiredPullRequestReviews `json:"required_pull_request_reviews"`
+ Restrictions *BranchRestrictionsRequest `json:"restrictions"`
+}
+
+// RequiredStatusChecks represents the protection status of a individual branch.
+type RequiredStatusChecks struct {
+ // Enforce required status checks for repository administrators. (Required.)
+ IncludeAdmins bool `json:"include_admins"`
+ // Require branches to be up to date before merging. (Required.)
+ Strict bool `json:"strict"`
+ // The list of status checks to require in order to merge into this
+ // branch. (Required; use []string{} instead of nil for empty list.)
+ Contexts []string `json:"contexts"`
+}
+
+// RequiredPullRequestReviews represents the protection configuration for pull requests.
+type RequiredPullRequestReviews struct {
+ // Enforce pull request reviews for repository administrators. (Required.)
+ IncludeAdmins bool `json:"include_admins"`
+}
+
+// BranchRestrictions represents the restriction that only certain users or
+// teams may push to a branch.
+type BranchRestrictions struct {
+ // The list of user logins with push access.
+ Users []*User `json:"users"`
+ // The list of team slugs with push access.
+ Teams []*Team `json:"teams"`
+}
+
+// BranchRestrictionsRequest represents the request to create/edit the
+// restriction that only certain users or teams may push to a branch. It is
+// separate from BranchRestrictions above because the request structure is
+// different from the response structure.
+type BranchRestrictionsRequest struct {
+ // The list of user logins with push access. (Required; use []string{} instead of nil for empty list.)
+ Users []string `json:"users"`
+ // The list of team slugs with push access. (Required; use []string{} instead of nil for empty list.)
+ Teams []string `json:"teams"`
+}
+
+// ListBranches lists branches for the specified repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/#list-branches
+func (s *RepositoriesService) ListBranches(owner string, repo string, opt *ListOptions) ([]*Branch, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ branches := new([]*Branch)
+ resp, err := s.client.Do(req, branches)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *branches, resp, err
+}
+
+// GetBranch gets the specified branch for a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/#get-branch
+func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branch)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ b := new(Branch)
+ resp, err := s.client.Do(req, b)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return b, resp, err
+}
+
+// GetBranchProtection gets the protection of a given branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-branch-protection
+func (s *RepositoriesService) GetBranchProtection(owner, repo, branch string) (*Protection, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ p := new(Protection)
+ resp, err := s.client.Do(req, p)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return p, resp, err
+}
+
+// UpdateBranchProtection updates the protection of a given branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-branch-protection
+func (s *RepositoriesService) UpdateBranchProtection(owner, repo, branch string, preq *ProtectionRequest) (*Protection, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
+ req, err := s.client.NewRequest("PUT", u, preq)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ p := new(Protection)
+ resp, err := s.client.Do(req, p)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return p, resp, err
+}
+
+// RemoveBranchProtection removes the protection of a given branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-branch-protection
+func (s *RepositoriesService) RemoveBranchProtection(owner, repo, branch string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// License gets the contents of a repository's license if one is detected.
+//
+// GitHub API docs: https://developer.github.com/v3/licenses/#get-the-contents-of-a-repositorys-license
+func (s *RepositoriesService) License(owner, repo string) (*RepositoryLicense, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/license", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := &RepositoryLicense{}
+ resp, err := s.client.Do(req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_collaborators.go b/vendor/github.com/google/go-github/github/repos_collaborators.go
new file mode 100644
index 0000000..68a9f46
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_collaborators.go
@@ -0,0 +1,92 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// ListCollaborators lists the Github users that have access to the repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#list
+func (s *RepositoriesService) ListCollaborators(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/collaborators", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ users := new([]*User)
+ resp, err := s.client.Do(req, users)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *users, resp, err
+}
+
+// IsCollaborator checks whether the specified Github user has collaborator
+// access to the given repo.
+// Note: This will return false if the user is not a collaborator OR the user
+// is not a GitHub user.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#get
+func (s *RepositoriesService) IsCollaborator(owner, repo, user string) (bool, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resp, err := s.client.Do(req, nil)
+ isCollab, err := parseBoolResponse(err)
+ return isCollab, resp, err
+}
+
+// RepositoryAddCollaboratorOptions specifies the optional parameters to the
+// RepositoriesService.AddCollaborator method.
+type RepositoryAddCollaboratorOptions struct {
+ // Permission specifies the permission to grant the user on this repository.
+ // Possible values are:
+ // pull - team members can pull, but not push to or administer this repository
+ // push - team members can pull and push, but not administer this repository
+ // admin - team members can pull, push and administer this repository
+ //
+ // Default value is "push". This option is only valid for organization-owned repositories.
+ Permission string `json:"permission,omitempty"`
+}
+
+// AddCollaborator adds the specified Github user as collaborator to the given repo.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#add-user-as-a-collaborator
+func (s *RepositoriesService) AddCollaborator(owner, repo, user string, opt *RepositoryAddCollaboratorOptions) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
+ req, err := s.client.NewRequest("PUT", u, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// RemoveCollaborator removes the specified Github user as collaborator from the given repo.
+// Note: Does not return error if a valid user that is not a collaborator is removed.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#remove-collaborator
+func (s *RepositoriesService) RemoveCollaborator(owner, repo, user string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/repos_comments.go b/vendor/github.com/google/go-github/github/repos_comments.go
new file mode 100644
index 0000000..34a8d02
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_comments.go
@@ -0,0 +1,160 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// RepositoryComment represents a comment for a commit, file, or line in a repository.
+type RepositoryComment struct {
+ HTMLURL *string `json:"html_url,omitempty"`
+ URL *string `json:"url,omitempty"`
+ ID *int `json:"id,omitempty"`
+ CommitID *string `json:"commit_id,omitempty"`
+ User *User `json:"user,omitempty"`
+ Reactions *Reactions `json:"reactions,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+
+ // User-mutable fields
+ Body *string `json:"body"`
+ // User-initialized fields
+ Path *string `json:"path,omitempty"`
+ Position *int `json:"position,omitempty"`
+}
+
+func (r RepositoryComment) String() string {
+ return Stringify(r)
+}
+
+// ListComments lists all the comments for the repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/comments/#list-commit-comments-for-a-repository
+func (s *RepositoriesService) ListComments(owner, repo string, opt *ListOptions) ([]*RepositoryComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/comments", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ comments := new([]*RepositoryComment)
+ resp, err := s.client.Do(req, comments)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *comments, resp, err
+}
+
+// ListCommitComments lists all the comments for a given commit SHA.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/comments/#list-comments-for-a-single-commit
+func (s *RepositoriesService) ListCommitComments(owner, repo, sha string, opt *ListOptions) ([]*RepositoryComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ comments := new([]*RepositoryComment)
+ resp, err := s.client.Do(req, comments)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *comments, resp, err
+}
+
+// CreateComment creates a comment for the given commit.
+// Note: GitHub allows for comments to be created for non-existing files and positions.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/comments/#create-a-commit-comment
+func (s *RepositoriesService) CreateComment(owner, repo, sha string, comment *RepositoryComment) (*RepositoryComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha)
+ req, err := s.client.NewRequest("POST", u, comment)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := new(RepositoryComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// GetComment gets a single comment from a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/comments/#get-a-single-commit-comment
+func (s *RepositoriesService) GetComment(owner, repo string, id int) (*RepositoryComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeReactionsPreview)
+
+ c := new(RepositoryComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// UpdateComment updates the body of a single comment.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/comments/#update-a-commit-comment
+func (s *RepositoriesService) UpdateComment(owner, repo string, id int, comment *RepositoryComment) (*RepositoryComment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
+ req, err := s.client.NewRequest("PATCH", u, comment)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := new(RepositoryComment)
+ resp, err := s.client.Do(req, c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
+// DeleteComment deletes a single comment from a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/comments/#delete-a-commit-comment
+func (s *RepositoriesService) DeleteComment(owner, repo string, id int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/repos_commits.go b/vendor/github.com/google/go-github/github/repos_commits.go
new file mode 100644
index 0000000..22e8fca
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_commits.go
@@ -0,0 +1,202 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+)
+
+// RepositoryCommit represents a commit in a repo.
+// Note that it's wrapping a Commit, so author/committer information is in two places,
+// but contain different details about them: in RepositoryCommit "github details", in Commit - "git details".
+type RepositoryCommit struct {
+ SHA *string `json:"sha,omitempty"`
+ Commit *Commit `json:"commit,omitempty"`
+ Author *User `json:"author,omitempty"`
+ Committer *User `json:"committer,omitempty"`
+ Parents []Commit `json:"parents,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ URL *string `json:"url,omitempty"`
+ CommentsURL *string `json:"comments_url,omitempty"`
+
+ // Details about how many changes were made in this commit. Only filled in during GetCommit!
+ Stats *CommitStats `json:"stats,omitempty"`
+ // Details about which files, and how this commit touched. Only filled in during GetCommit!
+ Files []CommitFile `json:"files,omitempty"`
+}
+
+func (r RepositoryCommit) String() string {
+ return Stringify(r)
+}
+
+// CommitStats represents the number of additions / deletions from a file in a given RepositoryCommit or GistCommit.
+type CommitStats struct {
+ Additions *int `json:"additions,omitempty"`
+ Deletions *int `json:"deletions,omitempty"`
+ Total *int `json:"total,omitempty"`
+}
+
+func (c CommitStats) String() string {
+ return Stringify(c)
+}
+
+// CommitFile represents a file modified in a commit.
+type CommitFile struct {
+ SHA *string `json:"sha,omitempty"`
+ Filename *string `json:"filename,omitempty"`
+ Additions *int `json:"additions,omitempty"`
+ Deletions *int `json:"deletions,omitempty"`
+ Changes *int `json:"changes,omitempty"`
+ Status *string `json:"status,omitempty"`
+ Patch *string `json:"patch,omitempty"`
+ BlobURL *string `json:"blob_url,omitempty"`
+ RawURL *string `json:"raw_url,omitempty"`
+ ContentsURL *string `json:"contents_url,omitempty"`
+}
+
+func (c CommitFile) String() string {
+ return Stringify(c)
+}
+
+// CommitsComparison is the result of comparing two commits.
+// See CompareCommits() for details.
+type CommitsComparison struct {
+ BaseCommit *RepositoryCommit `json:"base_commit,omitempty"`
+ MergeBaseCommit *RepositoryCommit `json:"merge_base_commit,omitempty"`
+
+ // Head can be 'behind' or 'ahead'
+ Status *string `json:"status,omitempty"`
+ AheadBy *int `json:"ahead_by,omitempty"`
+ BehindBy *int `json:"behind_by,omitempty"`
+ TotalCommits *int `json:"total_commits,omitempty"`
+
+ Commits []RepositoryCommit `json:"commits,omitempty"`
+
+ Files []CommitFile `json:"files,omitempty"`
+}
+
+func (c CommitsComparison) String() string {
+ return Stringify(c)
+}
+
+// CommitsListOptions specifies the optional parameters to the
+// RepositoriesService.ListCommits method.
+type CommitsListOptions struct {
+ // SHA or branch to start listing Commits from.
+ SHA string `url:"sha,omitempty"`
+
+ // Path that should be touched by the returned Commits.
+ Path string `url:"path,omitempty"`
+
+ // Author of by which to filter Commits.
+ Author string `url:"author,omitempty"`
+
+ // Since when should Commits be included in the response.
+ Since time.Time `url:"since,omitempty"`
+
+ // Until when should Commits be included in the response.
+ Until time.Time `url:"until,omitempty"`
+
+ ListOptions
+}
+
+// ListCommits lists the commits of a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/commits/#list
+func (s *RepositoriesService) ListCommits(owner, repo string, opt *CommitsListOptions) ([]*RepositoryCommit, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/commits", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ commits := new([]*RepositoryCommit)
+ resp, err := s.client.Do(req, commits)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *commits, resp, err
+}
+
+// GetCommit fetches the specified commit, including all details about it.
+// todo: support media formats - https://github.com/google/go-github/issues/6
+//
+// GitHub API docs: http://developer.github.com/v3/repos/commits/#get-a-single-commit
+// See also: http://developer.github.com//v3/git/commits/#get-a-single-commit provides the same functionality
+func (s *RepositoriesService) GetCommit(owner, repo, sha string) (*RepositoryCommit, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
+ commit := new(RepositoryCommit)
+ resp, err := s.client.Do(req, commit)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return commit, resp, err
+}
+
+// GetCommitSHA1 gets the SHA-1 of a commit reference. If a last-known SHA1 is
+// supplied and no new commits have occurred, a 304 Unmodified response is returned.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
+func (s *RepositoriesService) GetCommitSHA1(owner, repo, ref, lastSHA string) (string, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, ref)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return "", nil, err
+ }
+ if lastSHA != "" {
+ req.Header.Set("If-None-Match", `"`+lastSHA+`"`)
+ }
+
+ req.Header.Set("Accept", mediaTypeV3SHA)
+
+ var buf bytes.Buffer
+ resp, err := s.client.Do(req, &buf)
+ if err != nil {
+ return "", resp, err
+ }
+
+ return buf.String(), resp, err
+}
+
+// CompareCommits compares a range of commits with each other.
+// todo: support media formats - https://github.com/google/go-github/issues/6
+//
+// GitHub API docs: http://developer.github.com/v3/repos/commits/index.html#compare-two-commits
+func (s *RepositoriesService) CompareCommits(owner, repo string, base, head string) (*CommitsComparison, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/compare/%v...%v", owner, repo, base, head)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ comp := new(CommitsComparison)
+ resp, err := s.client.Do(req, comp)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return comp, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_contents.go b/vendor/github.com/google/go-github/github/repos_contents.go
new file mode 100644
index 0000000..7b08cf0
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_contents.go
@@ -0,0 +1,279 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Repository contents API methods.
+// http://developer.github.com/v3/repos/contents/
+
+package github
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+)
+
+// RepositoryContent represents a file or directory in a github repository.
+type RepositoryContent struct {
+ Type *string `json:"type,omitempty"`
+ Encoding *string `json:"encoding,omitempty"`
+ Size *int `json:"size,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Path *string `json:"path,omitempty"`
+ // Content contains the actual file content, which may be encoded.
+ // Callers should call GetContent which will decode the content if
+ // necessary.
+ Content *string `json:"content,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ URL *string `json:"url,omitempty"`
+ GitURL *string `json:"git_url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ DownloadURL *string `json:"download_url,omitempty"`
+}
+
+// RepositoryContentResponse holds the parsed response from CreateFile, UpdateFile, and DeleteFile.
+type RepositoryContentResponse struct {
+ Content *RepositoryContent `json:"content,omitempty"`
+ Commit `json:"commit,omitempty"`
+}
+
+// RepositoryContentFileOptions specifies optional parameters for CreateFile, UpdateFile, and DeleteFile.
+type RepositoryContentFileOptions struct {
+ Message *string `json:"message,omitempty"`
+ Content []byte `json:"content,omitempty"` // unencoded
+ SHA *string `json:"sha,omitempty"`
+ Branch *string `json:"branch,omitempty"`
+ Author *CommitAuthor `json:"author,omitempty"`
+ Committer *CommitAuthor `json:"committer,omitempty"`
+}
+
+// RepositoryContentGetOptions represents an optional ref parameter, which can be a SHA,
+// branch, or tag
+type RepositoryContentGetOptions struct {
+ Ref string `url:"ref,omitempty"`
+}
+
+// String converts RepositoryContent to a string. It's primarily for testing.
+func (r RepositoryContent) String() string {
+ return Stringify(r)
+}
+
+// Decode decodes the file content if it is base64 encoded.
+//
+// Deprecated: Use GetContent instead.
+func (r *RepositoryContent) Decode() ([]byte, error) {
+ if *r.Encoding != "base64" {
+ return nil, errors.New("cannot decode non-base64")
+ }
+ o, err := base64.StdEncoding.DecodeString(*r.Content)
+ if err != nil {
+ return nil, err
+ }
+ return o, nil
+}
+
+// GetContent returns the content of r, decoding it if necessary.
+func (r *RepositoryContent) GetContent() (string, error) {
+ var encoding string
+ if r.Encoding != nil {
+ encoding = *r.Encoding
+ }
+
+ switch encoding {
+ case "base64":
+ c, err := base64.StdEncoding.DecodeString(*r.Content)
+ return string(c), err
+ case "":
+ if r.Content == nil {
+ return "", nil
+ }
+ return *r.Content, nil
+ default:
+ return "", fmt.Errorf("unsupported content encoding: %v", encoding)
+ }
+}
+
+// GetReadme gets the Readme file for the repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-the-readme
+func (s *RepositoriesService) GetReadme(owner, repo string, opt *RepositoryContentGetOptions) (*RepositoryContent, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/readme", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ readme := new(RepositoryContent)
+ resp, err := s.client.Do(req, readme)
+ if err != nil {
+ return nil, resp, err
+ }
+ return readme, resp, err
+}
+
+// DownloadContents returns an io.ReadCloser that reads the contents of the
+// specified file. This function will work with files of any size, as opposed
+// to GetContents which is limited to 1 Mb files. It is the caller's
+// responsibility to close the ReadCloser.
+func (s *RepositoriesService) DownloadContents(owner, repo, filepath string, opt *RepositoryContentGetOptions) (io.ReadCloser, error) {
+ dir := path.Dir(filepath)
+ filename := path.Base(filepath)
+ _, dirContents, _, err := s.GetContents(owner, repo, dir, opt)
+ if err != nil {
+ return nil, err
+ }
+ for _, contents := range dirContents {
+ if *contents.Name == filename {
+ if contents.DownloadURL == nil || *contents.DownloadURL == "" {
+ return nil, fmt.Errorf("No download link found for %s", filepath)
+ }
+ resp, err := s.client.client.Get(*contents.DownloadURL)
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+ }
+ }
+ return nil, fmt.Errorf("No file named %s found in %s", filename, dir)
+}
+
+// GetContents can return either the metadata and content of a single file
+// (when path references a file) or the metadata of all the files and/or
+// subdirectories of a directory (when path references a directory). To make it
+// easy to distinguish between both result types and to mimic the API as much
+// as possible, both result types will be returned but only one will contain a
+// value and the other will be nil.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-contents
+func (s *RepositoriesService) GetContents(owner, repo, path string, opt *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) {
+ escapedPath := (&url.URL{Path: path}).String()
+ u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, escapedPath)
+ u, err = addOptions(u, opt)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ var rawJSON json.RawMessage
+ resp, err = s.client.Do(req, &rawJSON)
+ if err != nil {
+ return nil, nil, resp, err
+ }
+ fileUnmarshalError := json.Unmarshal(rawJSON, &fileContent)
+ if fileUnmarshalError == nil {
+ return fileContent, nil, resp, fileUnmarshalError
+ }
+ directoryUnmarshalError := json.Unmarshal(rawJSON, &directoryContent)
+ if directoryUnmarshalError == nil {
+ return nil, directoryContent, resp, directoryUnmarshalError
+ }
+ return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s ", fileUnmarshalError, directoryUnmarshalError)
+}
+
+// CreateFile creates a new file in a repository at the given path and returns
+// the commit and file metadata.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/contents/#create-a-file
+func (s *RepositoriesService) CreateFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
+ req, err := s.client.NewRequest("PUT", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+ createResponse := new(RepositoryContentResponse)
+ resp, err := s.client.Do(req, createResponse)
+ if err != nil {
+ return nil, resp, err
+ }
+ return createResponse, resp, err
+}
+
+// UpdateFile updates a file in a repository at the given path and returns the
+// commit and file metadata. Requires the blob SHA of the file being updated.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/contents/#update-a-file
+func (s *RepositoriesService) UpdateFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
+ req, err := s.client.NewRequest("PUT", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+ updateResponse := new(RepositoryContentResponse)
+ resp, err := s.client.Do(req, updateResponse)
+ if err != nil {
+ return nil, resp, err
+ }
+ return updateResponse, resp, err
+}
+
+// DeleteFile deletes a file from a repository and returns the commit.
+// Requires the blob SHA of the file to be deleted.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/contents/#delete-a-file
+func (s *RepositoriesService) DeleteFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
+ req, err := s.client.NewRequest("DELETE", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+ deleteResponse := new(RepositoryContentResponse)
+ resp, err := s.client.Do(req, deleteResponse)
+ if err != nil {
+ return nil, resp, err
+ }
+ return deleteResponse, resp, err
+}
+
+// archiveFormat is used to define the archive type when calling GetArchiveLink.
+type archiveFormat string
+
+const (
+ // Tarball specifies an archive in gzipped tar format.
+ Tarball archiveFormat = "tarball"
+
+ // Zipball specifies an archive in zip format.
+ Zipball archiveFormat = "zipball"
+)
+
+// GetArchiveLink returns an URL to download a tarball or zipball archive for a
+// repository. The archiveFormat can be specified by either the github.Tarball
+// or github.Zipball constant.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-archive-link
+func (s *RepositoriesService) GetArchiveLink(owner, repo string, archiveformat archiveFormat, opt *RepositoryContentGetOptions) (*url.URL, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/%s", owner, repo, archiveformat)
+ if opt != nil && opt.Ref != "" {
+ u += fmt.Sprintf("/%s", opt.Ref)
+ }
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ var resp *http.Response
+ // Use http.DefaultTransport if no custom Transport is configured
+ if s.client.client.Transport == nil {
+ resp, err = http.DefaultTransport.RoundTrip(req)
+ } else {
+ resp, err = s.client.client.Transport.RoundTrip(req)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusFound {
+ return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
+ }
+ parsedURL, err := url.Parse(resp.Header.Get("Location"))
+ return parsedURL, newResponse(resp), err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_deployments.go b/vendor/github.com/google/go-github/github/repos_deployments.go
new file mode 100644
index 0000000..cf24520
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_deployments.go
@@ -0,0 +1,222 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Deployment represents a deployment in a repo
+type Deployment struct {
+ URL *string `json:"url,omitempty"`
+ ID *int `json:"id,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ Ref *string `json:"ref,omitempty"`
+ Task *string `json:"task,omitempty"`
+ Payload json.RawMessage `json:"payload,omitempty"`
+ Environment *string `json:"environment,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Creator *User `json:"creator,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"pushed_at,omitempty"`
+ StatusesURL *string `json:"statuses_url,omitempty"`
+ RepositoryURL *string `json:"repository_url,omitempty"`
+}
+
+// DeploymentRequest represents a deployment request
+type DeploymentRequest struct {
+ Ref *string `json:"ref,omitempty"`
+ Task *string `json:"task,omitempty"`
+ AutoMerge *bool `json:"auto_merge,omitempty"`
+ RequiredContexts *[]string `json:"required_contexts,omitempty"`
+ Payload *string `json:"payload,omitempty"`
+ Environment *string `json:"environment,omitempty"`
+ Description *string `json:"description,omitempty"`
+ TransientEnvironment *bool `json:"transient_environment,omitempty"`
+ ProductionEnvironment *bool `json:"production_environment,omitempty"`
+}
+
+// DeploymentsListOptions specifies the optional parameters to the
+// RepositoriesService.ListDeployments method.
+type DeploymentsListOptions struct {
+ // SHA of the Deployment.
+ SHA string `url:"sha,omitempty"`
+
+ // List deployments for a given ref.
+ Ref string `url:"ref,omitempty"`
+
+ // List deployments for a given task.
+ Task string `url:"task,omitempty"`
+
+ // List deployments for a given environment.
+ Environment string `url:"environment,omitempty"`
+
+ ListOptions
+}
+
+// ListDeployments lists the deployments of a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployments
+func (s *RepositoriesService) ListDeployments(owner, repo string, opt *DeploymentsListOptions) ([]*Deployment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ deployments := new([]*Deployment)
+ resp, err := s.client.Do(req, deployments)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *deployments, resp, err
+}
+
+// GetDeployment returns a single deployment of a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/deployments/#get-a-single-deployment
+func (s *RepositoriesService) GetDeployment(owner, repo string, deploymentID int) (*Deployment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/deployments/%v", owner, repo, deploymentID)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ deployment := new(Deployment)
+ resp, err := s.client.Do(req, deployment)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return deployment, resp, err
+}
+
+// CreateDeployment creates a new deployment for a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment
+func (s *RepositoriesService) CreateDeployment(owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo)
+
+ req, err := s.client.NewRequest("POST", u, request)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when deployment support fully launches
+ req.Header.Set("Accept", mediaTypeDeploymentStatusPreview)
+
+ d := new(Deployment)
+ resp, err := s.client.Do(req, d)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return d, resp, err
+}
+
+// DeploymentStatus represents the status of a
+// particular deployment.
+type DeploymentStatus struct {
+ ID *int `json:"id,omitempty"`
+ // State is the deployment state.
+ // Possible values are: "pending", "success", "failure", "error", "inactive".
+ State *string `json:"state,omitempty"`
+ Creator *User `json:"creator,omitempty"`
+ Description *string `json:"description,omitempty"`
+ TargetURL *string `json:"target_url,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"pushed_at,omitempty"`
+ DeploymentURL *string `json:"deployment_url,omitempty"`
+ RepositoryURL *string `json:"repository_url,omitempty"`
+}
+
+// DeploymentStatusRequest represents a deployment request
+type DeploymentStatusRequest struct {
+ State *string `json:"state,omitempty"`
+ TargetURL *string `json:"target_url,omitempty"` // Deprecated. Use LogURL instead.
+ LogURL *string `json:"log_url,omitempty"`
+ Description *string `json:"description,omitempty"`
+ EnvironmentURL *string `json:"environment_url,omitempty"`
+ AutoInactive *bool `json:"auto_inactive,omitempty"`
+}
+
+// ListDeploymentStatuses lists the statuses of a given deployment of a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployment-statuses
+func (s *RepositoriesService) ListDeploymentStatuses(owner, repo string, deployment int, opt *ListOptions) ([]*DeploymentStatus, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ statuses := new([]*DeploymentStatus)
+ resp, err := s.client.Do(req, statuses)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *statuses, resp, err
+}
+
+// GetDeploymentStatus returns a single deployment status of a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/deployments/#get-a-single-deployment-status
+func (s *RepositoriesService) GetDeploymentStatus(owner, repo string, deploymentID, deploymentStatusID int) (*DeploymentStatus, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses/%v", owner, repo, deploymentID, deploymentStatusID)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when deployment support fully launches
+ req.Header.Set("Accept", mediaTypeDeploymentStatusPreview)
+
+ d := new(DeploymentStatus)
+ resp, err := s.client.Do(req, d)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return d, resp, err
+}
+
+// CreateDeploymentStatus creates a new status for a deployment.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment-status
+func (s *RepositoriesService) CreateDeploymentStatus(owner, repo string, deployment int, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment)
+
+ req, err := s.client.NewRequest("POST", u, request)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when deployment support fully launches
+ req.Header.Set("Accept", mediaTypeDeploymentStatusPreview)
+
+ d := new(DeploymentStatus)
+ resp, err := s.client.Do(req, d)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return d, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_forks.go b/vendor/github.com/google/go-github/github/repos_forks.go
new file mode 100644
index 0000000..c88f3d3
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_forks.go
@@ -0,0 +1,79 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// RepositoryListForksOptions specifies the optional parameters to the
+// RepositoriesService.ListForks method.
+type RepositoryListForksOptions struct {
+ // How to sort the forks list. Possible values are: newest, oldest,
+ // watchers. Default is "newest".
+ Sort string `url:"sort,omitempty"`
+
+ ListOptions
+}
+
+// ListForks lists the forks of the specified repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/forks/#list-forks
+func (s *RepositoriesService) ListForks(owner, repo string, opt *RepositoryListForksOptions) ([]*Repository, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/forks", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ repos := new([]*Repository)
+ resp, err := s.client.Do(req, repos)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *repos, resp, err
+}
+
+// RepositoryCreateForkOptions specifies the optional parameters to the
+// RepositoriesService.CreateFork method.
+type RepositoryCreateForkOptions struct {
+ // The organization to fork the repository into.
+ Organization string `url:"organization,omitempty"`
+}
+
+// CreateFork creates a fork of the specified repository.
+//
+// This method might return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing creating the fork in a background task.
+// A follow up request, after a delay of a second or so, should result
+// in a successful request.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/forks/#create-a-fork
+func (s *RepositoriesService) CreateFork(owner, repo string, opt *RepositoryCreateForkOptions) (*Repository, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/forks", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("POST", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ fork := new(Repository)
+ resp, err := s.client.Do(req, fork)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return fork, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_hooks.go b/vendor/github.com/google/go-github/github/repos_hooks.go
new file mode 100644
index 0000000..fe725b4
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_hooks.go
@@ -0,0 +1,196 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// WebHookPayload represents the data that is received from GitHub when a push
+// event hook is triggered. The format of these payloads pre-date most of the
+// GitHub v3 API, so there are lots of minor incompatibilities with the types
+// defined in the rest of the API. Therefore, several types are duplicated
+// here to account for these differences.
+//
+// GitHub API docs: https://help.github.com/articles/post-receive-hooks
+type WebHookPayload struct {
+ After *string `json:"after,omitempty"`
+ Before *string `json:"before,omitempty"`
+ Commits []WebHookCommit `json:"commits,omitempty"`
+ Compare *string `json:"compare,omitempty"`
+ Created *bool `json:"created,omitempty"`
+ Deleted *bool `json:"deleted,omitempty"`
+ Forced *bool `json:"forced,omitempty"`
+ HeadCommit *WebHookCommit `json:"head_commit,omitempty"`
+ Pusher *User `json:"pusher,omitempty"`
+ Ref *string `json:"ref,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+}
+
+func (w WebHookPayload) String() string {
+ return Stringify(w)
+}
+
+// WebHookCommit represents the commit variant we receive from GitHub in a
+// WebHookPayload.
+type WebHookCommit struct {
+ Added []string `json:"added,omitempty"`
+ Author *WebHookAuthor `json:"author,omitempty"`
+ Committer *WebHookAuthor `json:"committer,omitempty"`
+ Distinct *bool `json:"distinct,omitempty"`
+ ID *string `json:"id,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Modified []string `json:"modified,omitempty"`
+ Removed []string `json:"removed,omitempty"`
+ Timestamp *time.Time `json:"timestamp,omitempty"`
+}
+
+func (w WebHookCommit) String() string {
+ return Stringify(w)
+}
+
+// WebHookAuthor represents the author or committer of a commit, as specified
+// in a WebHookCommit. The commit author may not correspond to a GitHub User.
+type WebHookAuthor struct {
+ Email *string `json:"email,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Username *string `json:"username,omitempty"`
+}
+
+func (w WebHookAuthor) String() string {
+ return Stringify(w)
+}
+
+// Hook represents a GitHub (web and service) hook for a repository.
+type Hook struct {
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ Name *string `json:"name,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Events []string `json:"events,omitempty"`
+ Active *bool `json:"active,omitempty"`
+ Config map[string]interface{} `json:"config,omitempty"`
+ ID *int `json:"id,omitempty"`
+}
+
+func (h Hook) String() string {
+ return Stringify(h)
+}
+
+// CreateHook creates a Hook for the specified repository.
+// Name and Config are required fields.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/hooks/#create-a-hook
+func (s *RepositoriesService) CreateHook(owner, repo string, hook *Hook) (*Hook, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo)
+ req, err := s.client.NewRequest("POST", u, hook)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ h := new(Hook)
+ resp, err := s.client.Do(req, h)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return h, resp, err
+}
+
+// ListHooks lists all Hooks for the specified repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/hooks/#list
+func (s *RepositoriesService) ListHooks(owner, repo string, opt *ListOptions) ([]*Hook, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ hooks := new([]*Hook)
+ resp, err := s.client.Do(req, hooks)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *hooks, resp, err
+}
+
+// GetHook returns a single specified Hook.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/hooks/#get-single-hook
+func (s *RepositoriesService) GetHook(owner, repo string, id int) (*Hook, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ hook := new(Hook)
+ resp, err := s.client.Do(req, hook)
+ return hook, resp, err
+}
+
+// EditHook updates a specified Hook.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/hooks/#edit-a-hook
+func (s *RepositoriesService) EditHook(owner, repo string, id int, hook *Hook) (*Hook, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
+ req, err := s.client.NewRequest("PATCH", u, hook)
+ if err != nil {
+ return nil, nil, err
+ }
+ h := new(Hook)
+ resp, err := s.client.Do(req, h)
+ return h, resp, err
+}
+
+// DeleteHook deletes a specified Hook.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/hooks/#delete-a-hook
+func (s *RepositoriesService) DeleteHook(owner, repo string, id int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// PingHook triggers a 'ping' event to be sent to the Hook.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/hooks/#ping-a-hook
+func (s *RepositoriesService) PingHook(owner, repo string, id int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks/%d/pings", owner, repo, id)
+ req, err := s.client.NewRequest("POST", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// TestHook triggers a test Hook by github.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/hooks/#test-a-push-hook
+func (s *RepositoriesService) TestHook(owner, repo string, id int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks/%d/tests", owner, repo, id)
+ req, err := s.client.NewRequest("POST", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// ListServiceHooks is deprecated. Use Client.ListServiceHooks instead.
+func (s *RepositoriesService) ListServiceHooks() ([]*ServiceHook, *Response, error) {
+ return s.client.ListServiceHooks()
+}
diff --git a/vendor/github.com/google/go-github/github/repos_invitations.go b/vendor/github.com/google/go-github/github/repos_invitations.go
new file mode 100644
index 0000000..f2806d1
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_invitations.go
@@ -0,0 +1,91 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// RepositoryInvitation represents an invitation to collaborate on a repo.
+type RepositoryInvitation struct {
+ ID *int `json:"id,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Invitee *User `json:"invitee,omitempty"`
+ Inviter *User `json:"inviter,omitempty"`
+
+ // Permissions represents the permissions that the associated user will have
+ // on the repository. Possible values are: "read", "write", "admin".
+ Permissions *string `json:"permissions,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+}
+
+// ListInvitations lists all currently-open repository invitations.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-invitations-for-a-repository
+func (s *RepositoriesService) ListInvitations(repoID int, opt *ListOptions) ([]*RepositoryInvitation, *Response, error) {
+ u := fmt.Sprintf("repositories/%v/invitations", repoID)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
+
+ invites := []*RepositoryInvitation{}
+ resp, err := s.client.Do(req, &invites)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return invites, resp, err
+}
+
+// DeleteInvitation deletes a repository invitation.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/invitations/#delete-a-repository-invitation
+func (s *RepositoriesService) DeleteInvitation(repoID, invitationID int) (*Response, error) {
+ u := fmt.Sprintf("repositories/%v/invitations/%v", repoID, invitationID)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// UpdateInvitation updates the permissions associated with a repository
+// invitation.
+//
+// permissions represents the permissions that the associated user will have
+// on the repository. Possible values are: "read", "write", "admin".
+//
+// GitHub API docs: https://developer.github.com/v3/repos/invitations/#update-a-repository-invitation
+func (s *RepositoriesService) UpdateInvitation(repoID, invitationID int, permissions string) (*RepositoryInvitation, *Response, error) {
+ opts := &struct {
+ Permissions string `json:"permissions"`
+ }{Permissions: permissions}
+ u := fmt.Sprintf("repositories/%v/invitations/%v", repoID, invitationID)
+ req, err := s.client.NewRequest("PATCH", u, opts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
+
+ invite := &RepositoryInvitation{}
+ resp, err := s.client.Do(req, invite)
+ return invite, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_keys.go b/vendor/github.com/google/go-github/github/repos_keys.go
new file mode 100644
index 0000000..0bb404a
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_keys.go
@@ -0,0 +1,108 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// The Key type is defined in users_keys.go
+
+// ListKeys lists the deploy keys for a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/keys/#list
+func (s *RepositoriesService) ListKeys(owner string, repo string, opt *ListOptions) ([]*Key, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/keys", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ keys := new([]*Key)
+ resp, err := s.client.Do(req, keys)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *keys, resp, err
+}
+
+// GetKey fetches a single deploy key.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/keys/#get
+func (s *RepositoriesService) GetKey(owner string, repo string, id int) (*Key, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ key := new(Key)
+ resp, err := s.client.Do(req, key)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return key, resp, err
+}
+
+// CreateKey adds a deploy key for a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/keys/#create
+func (s *RepositoriesService) CreateKey(owner string, repo string, key *Key) (*Key, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/keys", owner, repo)
+
+ req, err := s.client.NewRequest("POST", u, key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ k := new(Key)
+ resp, err := s.client.Do(req, k)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return k, resp, err
+}
+
+// EditKey edits a deploy key.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/keys/#edit
+func (s *RepositoriesService) EditKey(owner string, repo string, id int, key *Key) (*Key, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
+
+ req, err := s.client.NewRequest("PATCH", u, key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ k := new(Key)
+ resp, err := s.client.Do(req, k)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return k, resp, err
+}
+
+// DeleteKey deletes a deploy key.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/keys/#delete
+func (s *RepositoriesService) DeleteKey(owner string, repo string, id int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/repos_merging.go b/vendor/github.com/google/go-github/github/repos_merging.go
new file mode 100644
index 0000000..31f8313
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_merging.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+)
+
+// RepositoryMergeRequest represents a request to merge a branch in a
+// repository.
+type RepositoryMergeRequest struct {
+ Base *string `json:"base,omitempty"`
+ Head *string `json:"head,omitempty"`
+ CommitMessage *string `json:"commit_message,omitempty"`
+}
+
+// Merge a branch in the specified repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/merging/#perform-a-merge
+func (s *RepositoriesService) Merge(owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/merges", owner, repo)
+ req, err := s.client.NewRequest("POST", u, request)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ commit := new(RepositoryCommit)
+ resp, err := s.client.Do(req, commit)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return commit, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_pages.go b/vendor/github.com/google/go-github/github/repos_pages.go
new file mode 100644
index 0000000..ddd8301
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_pages.go
@@ -0,0 +1,135 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Pages represents a GitHub Pages site configuration.
+type Pages struct {
+ URL *string `json:"url,omitempty"`
+ Status *string `json:"status,omitempty"`
+ CNAME *string `json:"cname,omitempty"`
+ Custom404 *bool `json:"custom_404,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+}
+
+// PagesError represents a build error for a GitHub Pages site.
+type PagesError struct {
+ Message *string `json:"message,omitempty"`
+}
+
+// PagesBuild represents the build information for a GitHub Pages site.
+type PagesBuild struct {
+ URL *string `json:"url,omitempty"`
+ Status *string `json:"status,omitempty"`
+ Error *PagesError `json:"error,omitempty"`
+ Pusher *User `json:"pusher,omitempty"`
+ Commit *string `json:"commit,omitempty"`
+ Duration *int `json:"duration,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+}
+
+// GetPagesInfo fetches information about a GitHub Pages site.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/pages/#get-information-about-a-pages-site
+func (s *RepositoriesService) GetPagesInfo(owner, repo string) (*Pages, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pages", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypePagesPreview)
+
+ site := new(Pages)
+ resp, err := s.client.Do(req, site)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return site, resp, err
+}
+
+// ListPagesBuilds lists the builds for a GitHub Pages site.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-pages-builds
+func (s *RepositoriesService) ListPagesBuilds(owner, repo string) ([]*PagesBuild, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var pages []*PagesBuild
+ resp, err := s.client.Do(req, &pages)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pages, resp, err
+}
+
+// GetLatestPagesBuild fetches the latest build information for a GitHub pages site.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-latest-pages-build
+func (s *RepositoriesService) GetLatestPagesBuild(owner, repo string) (*PagesBuild, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pages/builds/latest", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ build := new(PagesBuild)
+ resp, err := s.client.Do(req, build)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return build, resp, err
+}
+
+// GetPageBuild fetches the specific build information for a GitHub pages site.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-a-specific-pages-build
+func (s *RepositoriesService) GetPageBuild(owner, repo string, id int) (*PagesBuild, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pages/builds/%v", owner, repo, id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ build := new(PagesBuild)
+ resp, err := s.client.Do(req, build)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return build, resp, err
+}
+
+// RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/pages/#request-a-page-build
+func (s *RepositoriesService) RequestPageBuild(owner, repo string) (*PagesBuild, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
+ req, err := s.client.NewRequest("POST", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypePagesPreview)
+
+ build := new(PagesBuild)
+ resp, err := s.client.Do(req, build)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return build, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_projects.go b/vendor/github.com/google/go-github/github/repos_projects.go
new file mode 100644
index 0000000..137f89d
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_projects.go
@@ -0,0 +1,57 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// ListProjects lists the projects for a repo.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#list-repository-projects
+func (s *RepositoriesService) ListProjects(owner, repo string, opt *ListOptions) ([]*Project, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/projects", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ projects := []*Project{}
+ resp, err := s.client.Do(req, &projects)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return projects, resp, err
+}
+
+// CreateProject creates a GitHub Project for the specified repository.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#create-a-repository-project
+func (s *RepositoriesService) CreateProject(owner, repo string, opt *ProjectOptions) (*Project, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/projects", owner, repo)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ project := &Project{}
+ resp, err := s.client.Do(req, project)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return project, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_releases.go b/vendor/github.com/google/go-github/github/repos_releases.go
new file mode 100644
index 0000000..331a4b7
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_releases.go
@@ -0,0 +1,325 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// RepositoryRelease represents a GitHub release in a repository.
+type RepositoryRelease struct {
+ ID *int `json:"id,omitempty"`
+ TagName *string `json:"tag_name,omitempty"`
+ TargetCommitish *string `json:"target_commitish,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Body *string `json:"body,omitempty"`
+ Draft *bool `json:"draft,omitempty"`
+ Prerelease *bool `json:"prerelease,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ PublishedAt *Timestamp `json:"published_at,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ AssetsURL *string `json:"assets_url,omitempty"`
+ Assets []ReleaseAsset `json:"assets,omitempty"`
+ UploadURL *string `json:"upload_url,omitempty"`
+ ZipballURL *string `json:"zipball_url,omitempty"`
+ TarballURL *string `json:"tarball_url,omitempty"`
+ Author *User `json:"author,omitempty"`
+}
+
+func (r RepositoryRelease) String() string {
+ return Stringify(r)
+}
+
+// ReleaseAsset represents a Github release asset in a repository.
+type ReleaseAsset struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Label *string `json:"label,omitempty"`
+ State *string `json:"state,omitempty"`
+ ContentType *string `json:"content_type,omitempty"`
+ Size *int `json:"size,omitempty"`
+ DownloadCount *int `json:"download_count,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+ BrowserDownloadURL *string `json:"browser_download_url,omitempty"`
+ Uploader *User `json:"uploader,omitempty"`
+}
+
+func (r ReleaseAsset) String() string {
+ return Stringify(r)
+}
+
+// ListReleases lists the releases for a repository.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/releases/#list-releases-for-a-repository
+func (s *RepositoriesService) ListReleases(owner, repo string, opt *ListOptions) ([]*RepositoryRelease, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ releases := new([]*RepositoryRelease)
+ resp, err := s.client.Do(req, releases)
+ if err != nil {
+ return nil, resp, err
+ }
+ return *releases, resp, err
+}
+
+// GetRelease fetches a single release.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/releases/#get-a-single-release
+func (s *RepositoriesService) GetRelease(owner, repo string, id int) (*RepositoryRelease, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
+ return s.getSingleRelease(u)
+}
+
+// GetLatestRelease fetches the latest published release for the repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-the-latest-release
+func (s *RepositoriesService) GetLatestRelease(owner, repo string) (*RepositoryRelease, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/latest", owner, repo)
+ return s.getSingleRelease(u)
+}
+
+// GetReleaseByTag fetches a release with the specified tag.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name
+func (s *RepositoriesService) GetReleaseByTag(owner, repo, tag string) (*RepositoryRelease, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/tags/%s", owner, repo, tag)
+ return s.getSingleRelease(u)
+}
+
+func (s *RepositoriesService) getSingleRelease(url string) (*RepositoryRelease, *Response, error) {
+ req, err := s.client.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ release := new(RepositoryRelease)
+ resp, err := s.client.Do(req, release)
+ if err != nil {
+ return nil, resp, err
+ }
+ return release, resp, err
+}
+
+// CreateRelease adds a new release for a repository.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#create-a-release
+func (s *RepositoriesService) CreateRelease(owner, repo string, release *RepositoryRelease) (*RepositoryRelease, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases", owner, repo)
+
+ req, err := s.client.NewRequest("POST", u, release)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := new(RepositoryRelease)
+ resp, err := s.client.Do(req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+ return r, resp, err
+}
+
+// EditRelease edits a repository release.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#edit-a-release
+func (s *RepositoriesService) EditRelease(owner, repo string, id int, release *RepositoryRelease) (*RepositoryRelease, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
+
+ req, err := s.client.NewRequest("PATCH", u, release)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := new(RepositoryRelease)
+ resp, err := s.client.Do(req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+ return r, resp, err
+}
+
+// DeleteRelease delete a single release from a repository.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#delete-a-release
+func (s *RepositoriesService) DeleteRelease(owner, repo string, id int) (*Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// ListReleaseAssets lists the release's assets.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#list-assets-for-a-release
+func (s *RepositoriesService) ListReleaseAssets(owner, repo string, id int, opt *ListOptions) ([]*ReleaseAsset, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ assets := new([]*ReleaseAsset)
+ resp, err := s.client.Do(req, assets)
+ if err != nil {
+ return nil, resp, nil
+ }
+ return *assets, resp, err
+}
+
+// GetReleaseAsset fetches a single release asset.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#get-a-single-release-asset
+func (s *RepositoriesService) GetReleaseAsset(owner, repo string, id int) (*ReleaseAsset, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ asset := new(ReleaseAsset)
+ resp, err := s.client.Do(req, asset)
+ if err != nil {
+ return nil, resp, nil
+ }
+ return asset, resp, err
+}
+
+// DownloadReleaseAsset downloads a release asset or returns a redirect URL.
+//
+// DownloadReleaseAsset returns an io.ReadCloser that reads the contents of the
+// specified release asset. It is the caller's responsibility to close the ReadCloser.
+// If a redirect is returned, the redirect URL will be returned as a string instead
+// of the io.ReadCloser. Exactly one of rc and redirectURL will be zero.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#get-a-single-release-asset
+func (s *RepositoriesService) DownloadReleaseAsset(owner, repo string, id int) (rc io.ReadCloser, redirectURL string, err error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ req.Header.Set("Accept", defaultMediaType)
+
+ s.client.clientMu.Lock()
+ defer s.client.clientMu.Unlock()
+
+ var loc string
+ saveRedirect := s.client.client.CheckRedirect
+ s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ loc = req.URL.String()
+ return errors.New("disable redirect")
+ }
+ defer func() { s.client.client.CheckRedirect = saveRedirect }()
+
+ resp, err := s.client.client.Do(req)
+ if err != nil {
+ if !strings.Contains(err.Error(), "disable redirect") {
+ return nil, "", err
+ }
+ return nil, loc, nil
+ }
+
+ if err := CheckResponse(resp); err != nil {
+ resp.Body.Close()
+ return nil, "", err
+ }
+
+ return resp.Body, "", nil
+}
+
+// EditReleaseAsset edits a repository release asset.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#edit-a-release-asset
+func (s *RepositoriesService) EditReleaseAsset(owner, repo string, id int, release *ReleaseAsset) (*ReleaseAsset, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
+
+ req, err := s.client.NewRequest("PATCH", u, release)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ asset := new(ReleaseAsset)
+ resp, err := s.client.Do(req, asset)
+ if err != nil {
+ return nil, resp, err
+ }
+ return asset, resp, err
+}
+
+// DeleteReleaseAsset delete a single release asset from a repository.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#delete-a-release-asset
+func (s *RepositoriesService) DeleteReleaseAsset(owner, repo string, id int) (*Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+ return s.client.Do(req, nil)
+}
+
+// UploadReleaseAsset creates an asset by uploading a file into a release repository.
+// To upload assets that cannot be represented by an os.File, call NewUploadRequest directly.
+//
+// GitHub API docs : http://developer.github.com/v3/repos/releases/#upload-a-release-asset
+func (s *RepositoriesService) UploadReleaseAsset(owner, repo string, id int, opt *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ stat, err := file.Stat()
+ if err != nil {
+ return nil, nil, err
+ }
+ if stat.IsDir() {
+ return nil, nil, errors.New("the asset to upload can't be a directory")
+ }
+
+ mediaType := mime.TypeByExtension(filepath.Ext(file.Name()))
+ req, err := s.client.NewUploadRequest(u, file, stat.Size(), mediaType)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ asset := new(ReleaseAsset)
+ resp, err := s.client.Do(req, asset)
+ if err != nil {
+ return nil, resp, err
+ }
+ return asset, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_stats.go b/vendor/github.com/google/go-github/github/repos_stats.go
new file mode 100644
index 0000000..8657bd7
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_stats.go
@@ -0,0 +1,225 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// ContributorStats represents a contributor to a repository and their
+// weekly contributions to a given repo.
+type ContributorStats struct {
+ Author *Contributor `json:"author,omitempty"`
+ Total *int `json:"total,omitempty"`
+ Weeks []WeeklyStats `json:"weeks,omitempty"`
+}
+
+func (c ContributorStats) String() string {
+ return Stringify(c)
+}
+
+// WeeklyStats represents the number of additions, deletions and commits
+// a Contributor made in a given week.
+type WeeklyStats struct {
+ Week *Timestamp `json:"w,omitempty"`
+ Additions *int `json:"a,omitempty"`
+ Deletions *int `json:"d,omitempty"`
+ Commits *int `json:"c,omitempty"`
+}
+
+func (w WeeklyStats) String() string {
+ return Stringify(w)
+}
+
+// ListContributorsStats gets a repo's contributor list with additions,
+// deletions and commit counts.
+//
+// If this is the first time these statistics are requested for the given
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing the requested statistics. A follow up request, after a
+// delay of a second or so, should result in a successful request.
+//
+// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#contributors
+func (s *RepositoriesService) ListContributorsStats(owner, repo string) ([]*ContributorStats, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/stats/contributors", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var contributorStats []*ContributorStats
+ resp, err := s.client.Do(req, &contributorStats)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return contributorStats, resp, err
+}
+
+// WeeklyCommitActivity represents the weekly commit activity for a repository.
+// The days array is a group of commits per day, starting on Sunday.
+type WeeklyCommitActivity struct {
+ Days []int `json:"days,omitempty"`
+ Total *int `json:"total,omitempty"`
+ Week *Timestamp `json:"week,omitempty"`
+}
+
+func (w WeeklyCommitActivity) String() string {
+ return Stringify(w)
+}
+
+// ListCommitActivity returns the last year of commit activity
+// grouped by week. The days array is a group of commits per day,
+// starting on Sunday.
+//
+// If this is the first time these statistics are requested for the given
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing the requested statistics. A follow up request, after a
+// delay of a second or so, should result in a successful request.
+//
+// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#commit-activity
+func (s *RepositoriesService) ListCommitActivity(owner, repo string) ([]*WeeklyCommitActivity, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/stats/commit_activity", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var weeklyCommitActivity []*WeeklyCommitActivity
+ resp, err := s.client.Do(req, &weeklyCommitActivity)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return weeklyCommitActivity, resp, err
+}
+
+// ListCodeFrequency returns a weekly aggregate of the number of additions and
+// deletions pushed to a repository. Returned WeeklyStats will contain
+// additions and deletions, but not total commits.
+//
+// If this is the first time these statistics are requested for the given
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing the requested statistics. A follow up request, after a
+// delay of a second or so, should result in a successful request.
+//
+// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#code-frequency
+func (s *RepositoriesService) ListCodeFrequency(owner, repo string) ([]*WeeklyStats, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/stats/code_frequency", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var weeks [][]int
+ resp, err := s.client.Do(req, &weeks)
+
+ // convert int slices into WeeklyStats
+ var stats []*WeeklyStats
+ for _, week := range weeks {
+ if len(week) != 3 {
+ continue
+ }
+ stat := &WeeklyStats{
+ Week: &Timestamp{time.Unix(int64(week[0]), 0)},
+ Additions: Int(week[1]),
+ Deletions: Int(week[2]),
+ }
+ stats = append(stats, stat)
+ }
+
+ return stats, resp, err
+}
+
+// RepositoryParticipation is the number of commits by everyone
+// who has contributed to the repository (including the owner)
+// as well as the number of commits by the owner themself.
+type RepositoryParticipation struct {
+ All []int `json:"all,omitempty"`
+ Owner []int `json:"owner,omitempty"`
+}
+
+func (r RepositoryParticipation) String() string {
+ return Stringify(r)
+}
+
+// ListParticipation returns the total commit counts for the 'owner'
+// and total commit counts in 'all'. 'all' is everyone combined,
+// including the 'owner' in the last 52 weeks. If you’d like to get
+// the commit counts for non-owners, you can subtract 'all' from 'owner'.
+//
+// The array order is oldest week (index 0) to most recent week.
+//
+// If this is the first time these statistics are requested for the given
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing the requested statistics. A follow up request, after a
+// delay of a second or so, should result in a successful request.
+//
+// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#participation
+func (s *RepositoriesService) ListParticipation(owner, repo string) (*RepositoryParticipation, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/stats/participation", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ participation := new(RepositoryParticipation)
+ resp, err := s.client.Do(req, participation)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return participation, resp, err
+}
+
+// PunchCard represents the number of commits made during a given hour of a
+// day of thew eek.
+type PunchCard struct {
+ Day *int // Day of the week (0-6: =Sunday - Saturday).
+ Hour *int // Hour of day (0-23).
+ Commits *int // Number of commits.
+}
+
+// ListPunchCard returns the number of commits per hour in each day.
+//
+// If this is the first time these statistics are requested for the given
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing the requested statistics. A follow up request, after a
+// delay of a second or so, should result in a successful request.
+//
+// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#punch-card
+func (s *RepositoriesService) ListPunchCard(owner, repo string) ([]*PunchCard, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/stats/punch_card", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var results [][]int
+ resp, err := s.client.Do(req, &results)
+
+ // convert int slices into Punchcards
+ var cards []*PunchCard
+ for _, result := range results {
+ if len(result) != 3 {
+ continue
+ }
+ card := &PunchCard{
+ Day: Int(result[0]),
+ Hour: Int(result[1]),
+ Commits: Int(result[2]),
+ }
+ cards = append(cards, card)
+ }
+
+ return cards, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_statuses.go b/vendor/github.com/google/go-github/github/repos_statuses.go
new file mode 100644
index 0000000..6478ee2
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_statuses.go
@@ -0,0 +1,128 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// RepoStatus represents the status of a repository at a particular reference.
+type RepoStatus struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+
+ // State is the current state of the repository. Possible values are:
+ // pending, success, error, or failure.
+ State *string `json:"state,omitempty"`
+
+ // TargetURL is the URL of the page representing this status. It will be
+ // linked from the GitHub UI to allow users to see the source of the status.
+ TargetURL *string `json:"target_url,omitempty"`
+
+ // Description is a short high level summary of the status.
+ Description *string `json:"description,omitempty"`
+
+ // A string label to differentiate this status from the statuses of other systems.
+ Context *string `json:"context,omitempty"`
+
+ Creator *User `json:"creator,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+}
+
+func (r RepoStatus) String() string {
+ return Stringify(r)
+}
+
+// ListStatuses lists the statuses of a repository at the specified
+// reference. ref can be a SHA, a branch name, or a tag name.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/statuses/#list-statuses-for-a-specific-ref
+func (s *RepositoriesService) ListStatuses(owner, repo, ref string, opt *ListOptions) ([]*RepoStatus, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/commits/%v/statuses", owner, repo, ref)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ statuses := new([]*RepoStatus)
+ resp, err := s.client.Do(req, statuses)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *statuses, resp, err
+}
+
+// CreateStatus creates a new status for a repository at the specified
+// reference. Ref can be a SHA, a branch name, or a tag name.
+//
+// GitHub API docs: http://developer.github.com/v3/repos/statuses/#create-a-status
+func (s *RepositoriesService) CreateStatus(owner, repo, ref string, status *RepoStatus) (*RepoStatus, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/statuses/%v", owner, repo, ref)
+ req, err := s.client.NewRequest("POST", u, status)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ repoStatus := new(RepoStatus)
+ resp, err := s.client.Do(req, repoStatus)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return repoStatus, resp, err
+}
+
+// CombinedStatus represents the combined status of a repository at a particular reference.
+type CombinedStatus struct {
+ // State is the combined state of the repository. Possible values are:
+ // failure, pending, or success.
+ State *string `json:"state,omitempty"`
+
+ Name *string `json:"name,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ TotalCount *int `json:"total_count,omitempty"`
+ Statuses []RepoStatus `json:"statuses,omitempty"`
+
+ CommitURL *string `json:"commit_url,omitempty"`
+ RepositoryURL *string `json:"repository_url,omitempty"`
+}
+
+func (s CombinedStatus) String() string {
+ return Stringify(s)
+}
+
+// GetCombinedStatus returns the combined status of a repository at the specified
+// reference. ref can be a SHA, a branch name, or a tag name.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref
+func (s *RepositoriesService) GetCombinedStatus(owner, repo, ref string, opt *ListOptions) (*CombinedStatus, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/commits/%v/status", owner, repo, ref)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ status := new(CombinedStatus)
+ resp, err := s.client.Do(req, status)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return status, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/repos_traffic.go b/vendor/github.com/google/go-github/github/repos_traffic.go
new file mode 100644
index 0000000..b328ffa
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_traffic.go
@@ -0,0 +1,138 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// TrafficReferrer represent information about traffic from a referrer .
+type TrafficReferrer struct {
+ Referrer *string `json:"referrer,omitempty"`
+ Count *int `json:"count,omitempty"`
+ Uniques *int `json:"uniques,omitempty"`
+}
+
+// TrafficPath represent information about the traffic on a path of the repo.
+type TrafficPath struct {
+ Path *string `json:"path,omitempty"`
+ Title *string `json:"title,omitempty"`
+ Count *int `json:"count,omitempty"`
+ Uniques *int `json:"uniques,omitempty"`
+}
+
+// TrafficData represent information about a specific timestamp in views or clones list.
+type TrafficData struct {
+ Timestamp *Timestamp `json:"timestamp,omitempty"`
+ Count *int `json:"count,omitempty"`
+ Uniques *int `json:"uniques,omitempty"`
+}
+
+// TrafficViews represent information about the number of views in the last 14 days.
+type TrafficViews struct {
+ Views []*TrafficData `json:"views,omitempty"`
+ Count *int `json:"count,omitempty"`
+ Uniques *int `json:"uniques,omitempty"`
+}
+
+// TrafficClones represent information about the number of clones in the last 14 days.
+type TrafficClones struct {
+ Clones []*TrafficData `json:"clones,omitempty"`
+ Count *int `json:"count,omitempty"`
+ Uniques *int `json:"uniques,omitempty"`
+}
+
+// TrafficBreakdownOptions specifies the parameters to methods that support breakdown per day or week.
+// Can be one of: day, week. Default: day.
+type TrafficBreakdownOptions struct {
+ Per string `url:"per,omitempty"`
+}
+
+// ListTrafficReferrers list the top 10 referrers over the last 14 days.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/traffic/#list-referrers
+func (s *RepositoriesService) ListTrafficReferrers(owner, repo string) ([]*TrafficReferrer, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/traffic/popular/referrers", owner, repo)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ trafficReferrers := new([]*TrafficReferrer)
+ resp, err := s.client.Do(req, &trafficReferrers)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *trafficReferrers, resp, err
+}
+
+// ListTrafficPaths list the top 10 popular content over the last 14 days.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/traffic/#list-paths
+func (s *RepositoriesService) ListTrafficPaths(owner, repo string) ([]*TrafficPath, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/traffic/popular/paths", owner, repo)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var paths = new([]*TrafficPath)
+ resp, err := s.client.Do(req, &paths)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *paths, resp, err
+}
+
+// ListTrafficViews get total number of views for the last 14 days and breaks it down either per day or week.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/traffic/#views
+func (s *RepositoriesService) ListTrafficViews(owner, repo string, opt *TrafficBreakdownOptions) (*TrafficViews, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/traffic/views", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ trafficViews := new(TrafficViews)
+ resp, err := s.client.Do(req, &trafficViews)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return trafficViews, resp, err
+}
+
+// ListTrafficClones get total number of clones for the last 14 days and breaks it down either per day or week for the last 14 days.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/traffic/#views
+func (s *RepositoriesService) ListTrafficClones(owner, repo string, opt *TrafficBreakdownOptions) (*TrafficClones, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/traffic/clones", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ trafficClones := new(TrafficClones)
+ resp, err := s.client.Do(req, &trafficClones)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return trafficClones, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/search.go b/vendor/github.com/google/go-github/github/search.go
new file mode 100644
index 0000000..579a57d
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/search.go
@@ -0,0 +1,160 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+
+ qs "github.com/google/go-querystring/query"
+)
+
+// SearchService provides access to the search related functions
+// in the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/search/
+type SearchService service
+
+// SearchOptions specifies optional parameters to the SearchService methods.
+type SearchOptions struct {
+ // How to sort the search results. Possible values are:
+ // - for repositories: stars, fork, updated
+ // - for code: indexed
+ // - for issues: comments, created, updated
+ // - for users: followers, repositories, joined
+ //
+ // Default is to sort by best match.
+ Sort string `url:"sort,omitempty"`
+
+ // Sort order if sort parameter is provided. Possible values are: asc,
+ // desc. Default is desc.
+ Order string `url:"order,omitempty"`
+
+ // Whether to retrieve text match metadata with a query
+ TextMatch bool `url:"-"`
+
+ ListOptions
+}
+
+// RepositoriesSearchResult represents the result of a repositories search.
+type RepositoriesSearchResult struct {
+ Total *int `json:"total_count,omitempty"`
+ IncompleteResults *bool `json:"incomplete_results,omitempty"`
+ Repositories []Repository `json:"items,omitempty"`
+}
+
+// Repositories searches repositories via various criteria.
+//
+// GitHub API docs: http://developer.github.com/v3/search/#search-repositories
+func (s *SearchService) Repositories(query string, opt *SearchOptions) (*RepositoriesSearchResult, *Response, error) {
+ result := new(RepositoriesSearchResult)
+ resp, err := s.search("repositories", query, opt, result)
+ return result, resp, err
+}
+
+// IssuesSearchResult represents the result of an issues search.
+type IssuesSearchResult struct {
+ Total *int `json:"total_count,omitempty"`
+ IncompleteResults *bool `json:"incomplete_results,omitempty"`
+ Issues []Issue `json:"items,omitempty"`
+}
+
+// Issues searches issues via various criteria.
+//
+// GitHub API docs: http://developer.github.com/v3/search/#search-issues
+func (s *SearchService) Issues(query string, opt *SearchOptions) (*IssuesSearchResult, *Response, error) {
+ result := new(IssuesSearchResult)
+ resp, err := s.search("issues", query, opt, result)
+ return result, resp, err
+}
+
+// UsersSearchResult represents the result of a users search.
+type UsersSearchResult struct {
+ Total *int `json:"total_count,omitempty"`
+ IncompleteResults *bool `json:"incomplete_results,omitempty"`
+ Users []User `json:"items,omitempty"`
+}
+
+// Users searches users via various criteria.
+//
+// GitHub API docs: http://developer.github.com/v3/search/#search-users
+func (s *SearchService) Users(query string, opt *SearchOptions) (*UsersSearchResult, *Response, error) {
+ result := new(UsersSearchResult)
+ resp, err := s.search("users", query, opt, result)
+ return result, resp, err
+}
+
+// Match represents a single text match.
+type Match struct {
+ Text *string `json:"text,omitempty"`
+ Indices []int `json:"indices,omitempty"`
+}
+
+// TextMatch represents a text match for a SearchResult
+type TextMatch struct {
+ ObjectURL *string `json:"object_url,omitempty"`
+ ObjectType *string `json:"object_type,omitempty"`
+ Property *string `json:"property,omitempty"`
+ Fragment *string `json:"fragment,omitempty"`
+ Matches []Match `json:"matches,omitempty"`
+}
+
+func (tm TextMatch) String() string {
+ return Stringify(tm)
+}
+
+// CodeSearchResult represents the result of a code search.
+type CodeSearchResult struct {
+ Total *int `json:"total_count,omitempty"`
+ IncompleteResults *bool `json:"incomplete_results,omitempty"`
+ CodeResults []CodeResult `json:"items,omitempty"`
+}
+
+// CodeResult represents a single search result.
+type CodeResult struct {
+ Name *string `json:"name,omitempty"`
+ Path *string `json:"path,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ Repository *Repository `json:"repository,omitempty"`
+ TextMatches []TextMatch `json:"text_matches,omitempty"`
+}
+
+func (c CodeResult) String() string {
+ return Stringify(c)
+}
+
+// Code searches code via various criteria.
+//
+// GitHub API docs: http://developer.github.com/v3/search/#search-code
+func (s *SearchService) Code(query string, opt *SearchOptions) (*CodeSearchResult, *Response, error) {
+ result := new(CodeSearchResult)
+ resp, err := s.search("code", query, opt, result)
+ return result, resp, err
+}
+
+// Helper function that executes search queries against different
+// GitHub search types (repositories, code, issues, users)
+func (s *SearchService) search(searchType string, query string, opt *SearchOptions, result interface{}) (*Response, error) {
+ params, err := qs.Values(opt)
+ if err != nil {
+ return nil, err
+ }
+ params.Add("q", query)
+ u := fmt.Sprintf("search/%s?%s", searchType, params.Encode())
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if opt != nil && opt.TextMatch {
+ // Accept header defaults to "application/vnd.github.v3+json"
+ // We change it here to fetch back text-match metadata
+ req.Header.Set("Accept", "application/vnd.github.v3.text-match+json")
+ }
+
+ return s.client.Do(req, result)
+}
diff --git a/vendor/github.com/google/go-github/github/strings.go b/vendor/github.com/google/go-github/github/strings.go
new file mode 100644
index 0000000..3857723
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/strings.go
@@ -0,0 +1,93 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "reflect"
+)
+
+var timestampType = reflect.TypeOf(Timestamp{})
+
+// Stringify attempts to create a reasonable string representation of types in
+// the GitHub library. It does things like resolve pointers to their values
+// and omits struct fields with nil values.
+func Stringify(message interface{}) string {
+ var buf bytes.Buffer
+ v := reflect.ValueOf(message)
+ stringifyValue(&buf, v)
+ return buf.String()
+}
+
+// stringifyValue was heavily inspired by the goprotobuf library.
+
+func stringifyValue(w io.Writer, val reflect.Value) {
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ w.Write([]byte(""))
+ return
+ }
+
+ v := reflect.Indirect(val)
+
+ switch v.Kind() {
+ case reflect.String:
+ fmt.Fprintf(w, `"%s"`, v)
+ case reflect.Slice:
+ w.Write([]byte{'['})
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ w.Write([]byte{' '})
+ }
+
+ stringifyValue(w, v.Index(i))
+ }
+
+ w.Write([]byte{']'})
+ return
+ case reflect.Struct:
+ if v.Type().Name() != "" {
+ w.Write([]byte(v.Type().String()))
+ }
+
+ // special handling of Timestamp values
+ if v.Type() == timestampType {
+ fmt.Fprintf(w, "{%s}", v.Interface())
+ return
+ }
+
+ w.Write([]byte{'{'})
+
+ var sep bool
+ for i := 0; i < v.NumField(); i++ {
+ fv := v.Field(i)
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ continue
+ }
+
+ if sep {
+ w.Write([]byte(", "))
+ } else {
+ sep = true
+ }
+
+ w.Write([]byte(v.Type().Field(i).Name))
+ w.Write([]byte{':'})
+ stringifyValue(w, fv)
+ }
+
+ w.Write([]byte{'}'})
+ default:
+ if v.CanInterface() {
+ fmt.Fprint(w, v.Interface())
+ }
+ }
+}
diff --git a/vendor/github.com/google/go-github/github/timestamp.go b/vendor/github.com/google/go-github/github/timestamp.go
new file mode 100644
index 0000000..a1c1554
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/timestamp.go
@@ -0,0 +1,41 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "strconv"
+ "time"
+)
+
+// Timestamp represents a time that can be unmarshalled from a JSON string
+// formatted as either an RFC3339 or Unix timestamp. This is necessary for some
+// fields since the GitHub API is inconsistent in how it represents times. All
+// exported methods of time.Time can be called on Timestamp.
+type Timestamp struct {
+ time.Time
+}
+
+func (t Timestamp) String() string {
+ return t.Time.String()
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+// Time is expected in RFC3339 or Unix format.
+func (t *Timestamp) UnmarshalJSON(data []byte) (err error) {
+ str := string(data)
+ i, err := strconv.ParseInt(str, 10, 64)
+ if err == nil {
+ (*t).Time = time.Unix(i, 0)
+ } else {
+ (*t).Time, err = time.Parse(`"`+time.RFC3339+`"`, str)
+ }
+ return
+}
+
+// Equal reports whether t and u are equal based on time.Equal
+func (t Timestamp) Equal(u Timestamp) bool {
+ return t.Time.Equal(u.Time)
+}
diff --git a/vendor/github.com/google/go-github/github/users.go b/vendor/github.com/google/go-github/github/users.go
new file mode 100644
index 0000000..8f63746
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/users.go
@@ -0,0 +1,222 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// UsersService handles communication with the user related
+// methods of the GitHub API.
+//
+// GitHub API docs: http://developer.github.com/v3/users/
+type UsersService service
+
+// User represents a GitHub user.
+type User struct {
+ Login *string `json:"login,omitempty"`
+ ID *int `json:"id,omitempty"`
+ AvatarURL *string `json:"avatar_url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ GravatarID *string `json:"gravatar_id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Company *string `json:"company,omitempty"`
+ Blog *string `json:"blog,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Email *string `json:"email,omitempty"`
+ Hireable *bool `json:"hireable,omitempty"`
+ Bio *string `json:"bio,omitempty"`
+ PublicRepos *int `json:"public_repos,omitempty"`
+ PublicGists *int `json:"public_gists,omitempty"`
+ Followers *int `json:"followers,omitempty"`
+ Following *int `json:"following,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+ SuspendedAt *Timestamp `json:"suspended_at,omitempty"`
+ Type *string `json:"type,omitempty"`
+ SiteAdmin *bool `json:"site_admin,omitempty"`
+ TotalPrivateRepos *int `json:"total_private_repos,omitempty"`
+ OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"`
+ PrivateGists *int `json:"private_gists,omitempty"`
+ DiskUsage *int `json:"disk_usage,omitempty"`
+ Collaborators *int `json:"collaborators,omitempty"`
+ Plan *Plan `json:"plan,omitempty"`
+
+ // API URLs
+ URL *string `json:"url,omitempty"`
+ EventsURL *string `json:"events_url,omitempty"`
+ FollowingURL *string `json:"following_url,omitempty"`
+ FollowersURL *string `json:"followers_url,omitempty"`
+ GistsURL *string `json:"gists_url,omitempty"`
+ OrganizationsURL *string `json:"organizations_url,omitempty"`
+ ReceivedEventsURL *string `json:"received_events_url,omitempty"`
+ ReposURL *string `json:"repos_url,omitempty"`
+ StarredURL *string `json:"starred_url,omitempty"`
+ SubscriptionsURL *string `json:"subscriptions_url,omitempty"`
+
+ // TextMatches is only populated from search results that request text matches
+ // See: search.go and https://developer.github.com/v3/search/#text-match-metadata
+ TextMatches []TextMatch `json:"text_matches,omitempty"`
+
+ // Permissions identifies the permissions that a user has on a given
+ // repository. This is only populated when calling Repositories.ListCollaborators.
+ Permissions *map[string]bool `json:"permissions,omitempty"`
+}
+
+func (u User) String() string {
+ return Stringify(u)
+}
+
+// Get fetches a user. Passing the empty string will fetch the authenticated
+// user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/#get-a-single-user
+func (s *UsersService) Get(user string) (*User, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v", user)
+ } else {
+ u = "user"
+ }
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ uResp := new(User)
+ resp, err := s.client.Do(req, uResp)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return uResp, resp, err
+}
+
+// GetByID fetches a user.
+//
+// Note: GetByID uses the undocumented GitHub API endpoint /user/:id.
+func (s *UsersService) GetByID(id int) (*User, *Response, error) {
+ u := fmt.Sprintf("user/%d", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ user := new(User)
+ resp, err := s.client.Do(req, user)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return user, resp, err
+}
+
+// Edit the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/#update-the-authenticated-user
+func (s *UsersService) Edit(user *User) (*User, *Response, error) {
+ u := "user"
+ req, err := s.client.NewRequest("PATCH", u, user)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ uResp := new(User)
+ resp, err := s.client.Do(req, uResp)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return uResp, resp, err
+}
+
+// UserListOptions specifies optional parameters to the UsersService.ListAll
+// method.
+type UserListOptions struct {
+ // ID of the last user seen
+ Since int `url:"since,omitempty"`
+
+ ListOptions
+}
+
+// ListAll lists all GitHub users.
+//
+// To paginate through all users, populate 'Since' with the ID of the last user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/#get-all-users
+func (s *UsersService) ListAll(opt *UserListOptions) ([]*User, *Response, error) {
+ u, err := addOptions("users", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ users := new([]*User)
+ resp, err := s.client.Do(req, users)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *users, resp, err
+}
+
+// ListInvitations lists all currently-open repository invitations for the
+// authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-a-users-repository-invitations
+func (s *UsersService) ListInvitations() ([]*RepositoryInvitation, *Response, error) {
+ req, err := s.client.NewRequest("GET", "user/repository_invitations", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
+
+ invites := []*RepositoryInvitation{}
+ resp, err := s.client.Do(req, &invites)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return invites, resp, err
+}
+
+// AcceptInvitation accepts the currently-open repository invitation for the
+// authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/invitations/#accept-a-repository-invitation
+func (s *UsersService) AcceptInvitation(invitationID int) (*Response, error) {
+ u := fmt.Sprintf("user/repository_invitations/%v", invitationID)
+ req, err := s.client.NewRequest("PATCH", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// DeclineInvitation declines the currently-open repository invitation for the
+// authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/invitations/#decline-a-repository-invitation
+func (s *UsersService) DeclineInvitation(invitationID int) (*Response, error) {
+ u := fmt.Sprintf("user/repository_invitations/%v", invitationID)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/users_administration.go b/vendor/github.com/google/go-github/github/users_administration.go
new file mode 100644
index 0000000..dc1dcb8
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/users_administration.go
@@ -0,0 +1,64 @@
+// Copyright 2014 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// PromoteSiteAdmin promotes a user to a site administrator of a GitHub Enterprise instance.
+//
+// GitHub API docs: https://developer.github.com/v3/users/administration/#promote-an-ordinary-user-to-a-site-administrator
+func (s *UsersService) PromoteSiteAdmin(user string) (*Response, error) {
+ u := fmt.Sprintf("users/%v/site_admin", user)
+
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// DemoteSiteAdmin demotes a user from site administrator of a GitHub Enterprise instance.
+//
+// GitHub API docs: https://developer.github.com/v3/users/administration/#demote-a-site-administrator-to-an-ordinary-user
+func (s *UsersService) DemoteSiteAdmin(user string) (*Response, error) {
+ u := fmt.Sprintf("users/%v/site_admin", user)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// Suspend a user on a GitHub Enterprise instance.
+//
+// GitHub API docs: https://developer.github.com/v3/users/administration/#suspend-a-user
+func (s *UsersService) Suspend(user string) (*Response, error) {
+ u := fmt.Sprintf("users/%v/suspended", user)
+
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// Unsuspend a user on a GitHub Enterprise instance.
+//
+// GitHub API docs: https://developer.github.com/v3/users/administration/#unsuspend-a-user
+func (s *UsersService) Unsuspend(user string) (*Response, error) {
+ u := fmt.Sprintf("users/%v/suspended", user)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/users_emails.go b/vendor/github.com/google/go-github/github/users_emails.go
new file mode 100644
index 0000000..e4a5898
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/users_emails.go
@@ -0,0 +1,69 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+// UserEmail represents user's email address
+type UserEmail struct {
+ Email *string `json:"email,omitempty"`
+ Primary *bool `json:"primary,omitempty"`
+ Verified *bool `json:"verified,omitempty"`
+}
+
+// ListEmails lists all email addresses for the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/emails/#list-email-addresses-for-a-user
+func (s *UsersService) ListEmails(opt *ListOptions) ([]*UserEmail, *Response, error) {
+ u := "user/emails"
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ emails := new([]*UserEmail)
+ resp, err := s.client.Do(req, emails)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *emails, resp, err
+}
+
+// AddEmails adds email addresses of the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/emails/#add-email-addresses
+func (s *UsersService) AddEmails(emails []string) ([]*UserEmail, *Response, error) {
+ u := "user/emails"
+ req, err := s.client.NewRequest("POST", u, emails)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ e := new([]*UserEmail)
+ resp, err := s.client.Do(req, e)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *e, resp, err
+}
+
+// DeleteEmails deletes email addresses from authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/emails/#delete-email-addresses
+func (s *UsersService) DeleteEmails(emails []string) (*Response, error) {
+ u := "user/emails"
+ req, err := s.client.NewRequest("DELETE", u, emails)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/users_followers.go b/vendor/github.com/google/go-github/github/users_followers.go
new file mode 100644
index 0000000..38a1662
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/users_followers.go
@@ -0,0 +1,116 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// ListFollowers lists the followers for a user. Passing the empty string will
+// fetch followers for the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/followers/#list-followers-of-a-user
+func (s *UsersService) ListFollowers(user string, opt *ListOptions) ([]*User, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/followers", user)
+ } else {
+ u = "user/followers"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ users := new([]*User)
+ resp, err := s.client.Do(req, users)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *users, resp, err
+}
+
+// ListFollowing lists the people that a user is following. Passing the empty
+// string will list people the authenticated user is following.
+//
+// GitHub API docs: http://developer.github.com/v3/users/followers/#list-users-followed-by-another-user
+func (s *UsersService) ListFollowing(user string, opt *ListOptions) ([]*User, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/following", user)
+ } else {
+ u = "user/following"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ users := new([]*User)
+ resp, err := s.client.Do(req, users)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *users, resp, err
+}
+
+// IsFollowing checks if "user" is following "target". Passing the empty
+// string for "user" will check if the authenticated user is following "target".
+//
+// GitHub API docs: http://developer.github.com/v3/users/followers/#check-if-you-are-following-a-user
+func (s *UsersService) IsFollowing(user, target string) (bool, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/following/%v", user, target)
+ } else {
+ u = fmt.Sprintf("user/following/%v", target)
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resp, err := s.client.Do(req, nil)
+ following, err := parseBoolResponse(err)
+ return following, resp, err
+}
+
+// Follow will cause the authenticated user to follow the specified user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/followers/#follow-a-user
+func (s *UsersService) Follow(user string) (*Response, error) {
+ u := fmt.Sprintf("user/following/%v", user)
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// Unfollow will cause the authenticated user to unfollow the specified user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/followers/#unfollow-a-user
+func (s *UsersService) Unfollow(user string) (*Response, error) {
+ u := fmt.Sprintf("user/following/%v", user)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/users_gpg_keys.go b/vendor/github.com/google/go-github/github/users_gpg_keys.go
new file mode 100644
index 0000000..08cfbed
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/users_gpg_keys.go
@@ -0,0 +1,127 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "fmt"
+ "time"
+)
+
+// GPGKey represents a GitHub user's public GPG key used to verify GPG signed commits and tags.
+//
+// https://developer.github.com/changes/2016-04-04-git-signing-api-preview/
+type GPGKey struct {
+ ID *int `json:"id,omitempty"`
+ PrimaryKeyID *int `json:"primary_key_id,omitempty"`
+ KeyID *string `json:"key_id,omitempty"`
+ PublicKey *string `json:"public_key,omitempty"`
+ Emails []GPGEmail `json:"emails,omitempty"`
+ Subkeys []GPGKey `json:"subkeys,omitempty"`
+ CanSign *bool `json:"can_sign,omitempty"`
+ CanEncryptComms *bool `json:"can_encrypt_comms,omitempty"`
+ CanEncryptStorage *bool `json:"can_encrypt_storage,omitempty"`
+ CanCertify *bool `json:"can_certify,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ ExpiresAt *time.Time `json:"expires_at,omitempty"`
+}
+
+// String stringifies a GPGKey.
+func (k GPGKey) String() string {
+ return Stringify(k)
+}
+
+// GPGEmail represents an email address associated to a GPG key.
+type GPGEmail struct {
+ Email *string `json:"email,omitempty"`
+ Verified *bool `json:"verified,omitempty"`
+}
+
+// ListGPGKeys lists the current user's GPG keys. It requires authentication
+// via Basic Auth or via OAuth with at least read:gpg_key scope.
+//
+// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#list-your-gpg-keys
+func (s *UsersService) ListGPGKeys() ([]*GPGKey, *Response, error) {
+ req, err := s.client.NewRequest("GET", "user/gpg_keys", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
+ var keys []*GPGKey
+ resp, err := s.client.Do(req, &keys)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return keys, resp, err
+}
+
+// GetGPGKey gets extended details for a single GPG key. It requires authentication
+// via Basic Auth or via OAuth with at least read:gpg_key scope.
+//
+// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#get-a-single-gpg-key
+func (s *UsersService) GetGPGKey(id int) (*GPGKey, *Response, error) {
+ u := fmt.Sprintf("user/gpg_keys/%v", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
+ key := &GPGKey{}
+ resp, err := s.client.Do(req, key)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return key, resp, err
+}
+
+// CreateGPGKey creates a GPG key. It requires authenticatation via Basic Auth
+// or OAuth with at least write:gpg_key scope.
+//
+// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#create-a-gpg-key
+func (s *UsersService) CreateGPGKey(armoredPublicKey string) (*GPGKey, *Response, error) {
+ gpgKey := &struct {
+ ArmoredPublicKey string `json:"armored_public_key"`
+ }{ArmoredPublicKey: armoredPublicKey}
+ req, err := s.client.NewRequest("POST", "user/gpg_keys", gpgKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
+ key := &GPGKey{}
+ resp, err := s.client.Do(req, key)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return key, resp, err
+}
+
+// DeleteGPGKey deletes a GPG key. It requires authentication via Basic Auth or
+// via OAuth with at least admin:gpg_key scope.
+//
+// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#delete-a-gpg-key
+func (s *UsersService) DeleteGPGKey(id int) (*Response, error) {
+ u := fmt.Sprintf("user/gpg_keys/%v", id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/users_keys.go b/vendor/github.com/google/go-github/github/users_keys.go
new file mode 100644
index 0000000..e4c255f
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/users_keys.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// Key represents a public SSH key used to authenticate a user or deploy script.
+type Key struct {
+ ID *int `json:"id,omitempty"`
+ Key *string `json:"key,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Title *string `json:"title,omitempty"`
+ ReadOnly *bool `json:"read_only,omitempty"`
+}
+
+func (k Key) String() string {
+ return Stringify(k)
+}
+
+// ListKeys lists the verified public keys for a user. Passing the empty
+// string will fetch keys for the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/keys/#list-public-keys-for-a-user
+func (s *UsersService) ListKeys(user string, opt *ListOptions) ([]*Key, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/keys", user)
+ } else {
+ u = "user/keys"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ keys := new([]*Key)
+ resp, err := s.client.Do(req, keys)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return *keys, resp, err
+}
+
+// GetKey fetches a single public key.
+//
+// GitHub API docs: http://developer.github.com/v3/users/keys/#get-a-single-public-key
+func (s *UsersService) GetKey(id int) (*Key, *Response, error) {
+ u := fmt.Sprintf("user/keys/%v", id)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ key := new(Key)
+ resp, err := s.client.Do(req, key)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return key, resp, err
+}
+
+// CreateKey adds a public key for the authenticated user.
+//
+// GitHub API docs: http://developer.github.com/v3/users/keys/#create-a-public-key
+func (s *UsersService) CreateKey(key *Key) (*Key, *Response, error) {
+ u := "user/keys"
+
+ req, err := s.client.NewRequest("POST", u, key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ k := new(Key)
+ resp, err := s.client.Do(req, k)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return k, resp, err
+}
+
+// DeleteKey deletes a public key.
+//
+// GitHub API docs: http://developer.github.com/v3/users/keys/#delete-a-public-key
+func (s *UsersService) DeleteKey(id int) (*Response, error) {
+ u := fmt.Sprintf("user/keys/%v", id)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE
new file mode 100644
index 0000000..ae121a1
--- /dev/null
+++ b/vendor/github.com/google/go-querystring/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 Google. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go
new file mode 100644
index 0000000..37080b1
--- /dev/null
+++ b/vendor/github.com/google/go-querystring/query/encode.go
@@ -0,0 +1,320 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package query implements encoding of structs into URL query parameters.
+//
+// As a simple example:
+//
+// type Options struct {
+// Query string `url:"q"`
+// ShowAll bool `url:"all"`
+// Page int `url:"page"`
+// }
+//
+// opt := Options{ "foo", true, 2 }
+// v, _ := query.Values(opt)
+// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2"
+//
+// The exact mapping between Go values and url.Values is described in the
+// documentation for the Values() function.
+package query
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var timeType = reflect.TypeOf(time.Time{})
+
+var encoderType = reflect.TypeOf(new(Encoder)).Elem()
+
+// Encoder is an interface implemented by any type that wishes to encode
+// itself into URL values in a non-standard way.
+type Encoder interface {
+ EncodeValues(key string, v *url.Values) error
+}
+
+// Values returns the url.Values encoding of v.
+//
+// Values expects to be passed a struct, and traverses it recursively using the
+// following encoding rules.
+//
+// Each exported struct field is encoded as a URL parameter unless
+//
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option
+//
+// The empty values are false, 0, any nil pointer or interface value, any array
+// slice, map, or string of length zero, and any time.Time that returns true
+// for IsZero().
+//
+// The URL parameter name defaults to the struct field name but can be
+// specified in the struct field's tag value. The "url" key in the struct
+// field's tag value is the key name, followed by an optional comma and
+// options. For example:
+//
+// // Field is ignored by this package.
+// Field int `url:"-"`
+//
+// // Field appears as URL parameter "myName".
+// Field int `url:"myName"`
+//
+// // Field appears as URL parameter "myName" and the field is omitted if
+// // its value is empty
+// Field int `url:"myName,omitempty"`
+//
+// // Field appears as URL parameter "Field" (the default), but the field
+// // is skipped if empty. Note the leading comma.
+// Field int `url:",omitempty"`
+//
+// For encoding individual field values, the following type-dependent rules
+// apply:
+//
+// Boolean values default to encoding as the strings "true" or "false".
+// Including the "int" option signals that the field should be encoded as the
+// strings "1" or "0".
+//
+// time.Time values default to encoding as RFC3339 timestamps. Including the
+// "unix" option signals that the field should be encoded as a Unix time (see
+// time.Unix())
+//
+// Slice and Array values default to encoding as multiple URL values of the
+// same name. Including the "comma" option signals that the field should be
+// encoded as a single comma-delimited value. Including the "space" option
+// similarly encodes the value as a single space-delimited string. Including
+// the "semicolon" option will encode the value as a semicolon-delimited string.
+// Including the "brackets" option signals that the multiple URL values should
+// have "[]" appended to the value name. "numbered" will append a number to
+// the end of each incidence of the value name, example:
+// name0=value0&name1=value1, etc.
+//
+// Anonymous struct fields are usually encoded as if their inner exported
+// fields were fields in the outer struct, subject to the standard Go
+// visibility rules. An anonymous struct field with a name given in its URL
+// tag is treated as having that name, rather than being anonymous.
+//
+// Non-nil pointer values are encoded as the value pointed to.
+//
+// Nested structs are encoded including parent fields in value names for
+// scoping. e.g:
+//
+// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO"
+//
+// All other values are encoded using their default string representation.
+//
+// Multiple fields that encode to the same URL parameter name will be included
+// as multiple URL values of the same name.
+func Values(v interface{}) (url.Values, error) {
+ values := make(url.Values)
+ val := reflect.ValueOf(v)
+ for val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ return values, nil
+ }
+ val = val.Elem()
+ }
+
+ if v == nil {
+ return values, nil
+ }
+
+ if val.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind())
+ }
+
+ err := reflectValue(values, val, "")
+ return values, err
+}
+
+// reflectValue populates the values parameter from the struct fields in val.
+// Embedded structs are followed recursively (using the rules defined in the
+// Values function documentation) breadth-first.
+func reflectValue(values url.Values, val reflect.Value, scope string) error {
+ var embedded []reflect.Value
+
+ typ := val.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ sf := typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+
+ sv := val.Field(i)
+ tag := sf.Tag.Get("url")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if name == "" {
+ if sf.Anonymous && sv.Kind() == reflect.Struct {
+ // save embedded struct for later processing
+ embedded = append(embedded, sv)
+ continue
+ }
+
+ name = sf.Name
+ }
+
+ if scope != "" {
+ name = scope + "[" + name + "]"
+ }
+
+ if opts.Contains("omitempty") && isEmptyValue(sv) {
+ continue
+ }
+
+ if sv.Type().Implements(encoderType) {
+ if !reflect.Indirect(sv).IsValid() {
+ sv = reflect.New(sv.Type().Elem())
+ }
+
+ m := sv.Interface().(Encoder)
+ if err := m.EncodeValues(name, &values); err != nil {
+ return err
+ }
+ continue
+ }
+
+ if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array {
+ var del byte
+ if opts.Contains("comma") {
+ del = ','
+ } else if opts.Contains("space") {
+ del = ' '
+ } else if opts.Contains("semicolon") {
+ del = ';'
+ } else if opts.Contains("brackets") {
+ name = name + "[]"
+ }
+
+ if del != 0 {
+ s := new(bytes.Buffer)
+ first := true
+ for i := 0; i < sv.Len(); i++ {
+ if first {
+ first = false
+ } else {
+ s.WriteByte(del)
+ }
+ s.WriteString(valueString(sv.Index(i), opts))
+ }
+ values.Add(name, s.String())
+ } else {
+ for i := 0; i < sv.Len(); i++ {
+ k := name
+ if opts.Contains("numbered") {
+ k = fmt.Sprintf("%s%d", name, i)
+ }
+ values.Add(k, valueString(sv.Index(i), opts))
+ }
+ }
+ continue
+ }
+
+ for sv.Kind() == reflect.Ptr {
+ if sv.IsNil() {
+ break
+ }
+ sv = sv.Elem()
+ }
+
+ if sv.Type() == timeType {
+ values.Add(name, valueString(sv, opts))
+ continue
+ }
+
+ if sv.Kind() == reflect.Struct {
+ reflectValue(values, sv, name)
+ continue
+ }
+
+ values.Add(name, valueString(sv, opts))
+ }
+
+ for _, f := range embedded {
+ if err := reflectValue(values, f, scope); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// valueString returns the string representation of a value.
+func valueString(v reflect.Value, opts tagOptions) string {
+ for v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return ""
+ }
+ v = v.Elem()
+ }
+
+ if v.Kind() == reflect.Bool && opts.Contains("int") {
+ if v.Bool() {
+ return "1"
+ }
+ return "0"
+ }
+
+ if v.Type() == timeType {
+ t := v.Interface().(time.Time)
+ if opts.Contains("unix") {
+ return strconv.FormatInt(t.Unix(), 10)
+ }
+ return t.Format(time.RFC3339)
+ }
+
+ return fmt.Sprint(v.Interface())
+}
+
+// isEmptyValue checks if a value should be considered empty for the purposes
+// of omitting fields with the "omitempty" option.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+
+ if v.Type() == timeType {
+ return v.Interface().(time.Time).IsZero()
+ }
+
+ return false
+}
+
+// tagOptions is the string following a comma in a struct field's "url" tag, or
+// the empty string. It does not include the leading comma.
+type tagOptions []string
+
+// parseTag splits a struct field's url tag into its name and comma-separated
+// options.
+func parseTag(tag string) (string, tagOptions) {
+ s := strings.Split(tag, ",")
+ return s[0], s[1:]
+}
+
+// Contains checks whether the tagOptions contains the specified option.
+func (o tagOptions) Contains(option string) bool {
+ for _, s := range o {
+ if s == option {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/go-update/LICENSE b/vendor/github.com/inconshreveable/go-update/LICENSE
new file mode 100644
index 0000000..418a5d1
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/go-update/README.md b/vendor/github.com/inconshreveable/go-update/README.md
new file mode 100644
index 0000000..438ffd4
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/README.md
@@ -0,0 +1,65 @@
+# go-update: Build self-updating Go programs [![godoc reference](https://godoc.org/github.com/inconshreveable/go-update?status.png)](https://godoc.org/github.com/inconshreveable/go-update)
+
+Package update provides functionality to implement secure, self-updating Go programs (or other single-file targets)
+A program can update itself by replacing its executable file with a new version.
+
+It provides the flexibility to implement different updating user experiences
+like auto-updating, or manual user-initiated updates. It also boasts
+advanced features like binary patching and code signing verification.
+
+Example of updating from a URL:
+
+```go
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/inconshreveable/go-update"
+)
+
+func doUpdate(url string) error {
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ err := update.Apply(resp.Body, update.Options{})
+ if err != nil {
+ // error handling
+ }
+ return err
+}
+```
+
+## Features
+
+- Cross platform support (Windows too!)
+- Binary patch application
+- Checksum verification
+- Code signing verification
+- Support for updating arbitrary files
+
+## [equinox.io](https://equinox.io)
+[equinox.io](https://equinox.io) is a complete ready-to-go updating solution built on top of go-update that provides:
+
+- Hosted updates
+- Update channels (stable, beta, nightly, ...)
+- Dynamically computed binary diffs
+- Automatic key generation and code
+- Release tooling with proper code signing
+- Update/download metrics
+
+## API Compatibility Promises
+The master branch of `go-update` is *not* guaranteed to have a stable API over time. For any production application, you should vendor
+your dependency on `go-update` with a tool like git submodules, [gb](http://getgb.io/) or [govendor](https://github.com/kardianos/govendor).
+
+The `go-update` package makes the following promises about API compatibility:
+1. A list of all API-breaking changes will be documented in this README.
+1. `go-update` will strive for as few API-breaking changes as possible.
+
+## API Breaking Changes
+- **Sept 3, 2015**: The `Options` struct passed to `Apply` was changed to be passed by value instead of passed by pointer. Old API at `28de026`.
+- **Aug 9, 2015**: 2.0 API. Old API at `221d034` or `gopkg.in/inconshreveable/go-update.v0`.
+
+## License
+Apache
diff --git a/vendor/github.com/inconshreveable/go-update/apply.go b/vendor/github.com/inconshreveable/go-update/apply.go
new file mode 100644
index 0000000..b26dae1
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/apply.go
@@ -0,0 +1,322 @@
+package update
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/inconshreveable/go-update/internal/osext"
+)
+
+var (
+ openFile = os.OpenFile
+)
+
+// Apply performs an update of the current executable (or opts.TargetFile, if set) with the contents of the given io.Reader.
+//
+// Apply performs the following actions to ensure a safe cross-platform update:
+//
+// 1. If configured, applies the contents of the update io.Reader as a binary patch.
+//
+// 2. If configured, computes the checksum of the new executable and verifies it matches.
+//
+// 3. If configured, verifies the signature with a public key.
+//
+// 4. Creates a new file, /path/to/.target.new with the TargetMode with the contents of the updated file
+//
+// 5. Renames /path/to/target to /path/to/.target.old
+//
+// 6. Renames /path/to/.target.new to /path/to/target
+//
+// 7. If the final rename is successful, deletes /path/to/.target.old, returns no error. On Windows,
+// the removal of /path/to/target.old always fails, so instead Apply hides the old file instead.
+//
+// 8. If the final rename fails, attempts to roll back by renaming /path/to/.target.old
+// back to /path/to/target.
+//
+// If the roll back operation fails, the file system is left in an inconsistent state (betweet steps 5 and 6) where
+// there is no new executable file and the old executable file could not be be moved to its original location. In this
+// case you should notify the user of the bad news and ask them to recover manually. Applications can determine whether
+// the rollback failed by calling RollbackError, see the documentation on that function for additional detail.
+func Apply(update io.Reader, opts Options) error {
+ // validate
+ verify := false
+ switch {
+ case opts.Signature != nil && opts.PublicKey != nil:
+ // okay
+ verify = true
+ case opts.Signature != nil:
+ return errors.New("no public key to verify signature with")
+ case opts.PublicKey != nil:
+ return errors.New("No signature to verify with")
+ }
+
+ // set defaults
+ if opts.Hash == 0 {
+ opts.Hash = crypto.SHA256
+ }
+ if opts.Verifier == nil {
+ opts.Verifier = NewECDSAVerifier()
+ }
+ if opts.TargetMode == 0 {
+ opts.TargetMode = 0755
+ }
+
+ // get target path
+ var err error
+ opts.TargetPath, err = opts.getPath()
+ if err != nil {
+ return err
+ }
+
+ var newBytes []byte
+ if opts.Patcher != nil {
+ if newBytes, err = opts.applyPatch(update); err != nil {
+ return err
+ }
+ } else {
+ // no patch to apply, go on through
+ if newBytes, err = ioutil.ReadAll(update); err != nil {
+ return err
+ }
+ }
+
+ // verify checksum if requested
+ if opts.Checksum != nil {
+ if err = opts.verifyChecksum(newBytes); err != nil {
+ return err
+ }
+ }
+
+ if verify {
+ if err = opts.verifySignature(newBytes); err != nil {
+ return err
+ }
+ }
+
+ // get the directory the executable exists in
+ updateDir := filepath.Dir(opts.TargetPath)
+ filename := filepath.Base(opts.TargetPath)
+
+ // Copy the contents of newbinary to a new executable file
+ newPath := filepath.Join(updateDir, fmt.Sprintf(".%s.new", filename))
+ fp, err := openFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, opts.TargetMode)
+ if err != nil {
+ return err
+ }
+ defer fp.Close()
+
+ _, err = io.Copy(fp, bytes.NewReader(newBytes))
+ if err != nil {
+ return err
+ }
+
+ // if we don't call fp.Close(), windows won't let us move the new executable
+ // because the file will still be "in use"
+ fp.Close()
+
+ // this is where we'll move the executable to so that we can swap in the updated replacement
+ oldPath := opts.OldSavePath
+ removeOld := opts.OldSavePath == ""
+ if removeOld {
+ oldPath = filepath.Join(updateDir, fmt.Sprintf(".%s.old", filename))
+ }
+
+ // delete any existing old exec file - this is necessary on Windows for two reasons:
+ // 1. after a successful update, Windows can't remove the .old file because the process is still running
+ // 2. windows rename operations fail if the destination file already exists
+ _ = os.Remove(oldPath)
+
+ // move the existing executable to a new file in the same directory
+ err = os.Rename(opts.TargetPath, oldPath)
+ if err != nil {
+ return err
+ }
+
+ // move the new exectuable in to become the new program
+ err = os.Rename(newPath, opts.TargetPath)
+
+ if err != nil {
+ // move unsuccessful
+ //
+ // The filesystem is now in a bad state. We have successfully
+ // moved the existing binary to a new location, but we couldn't move the new
+ // binary to take its place. That means there is no file where the current executable binary
+ // used to be!
+ // Try to rollback by restoring the old binary to its original path.
+ rerr := os.Rename(oldPath, opts.TargetPath)
+ if rerr != nil {
+ return &rollbackErr{err, rerr}
+ }
+
+ return err
+ }
+
+ // move successful, remove the old binary if needed
+ if removeOld {
+ errRemove := os.Remove(oldPath)
+
+ // windows has trouble with removing old binaries, so hide it instead
+ if errRemove != nil {
+ _ = hideFile(oldPath)
+ }
+ }
+
+ return nil
+}
+
+// RollbackError takes an error value returned by Apply and returns the error, if any,
+// that occurred when attempting to roll back from a failed update. Applications should
+// always call this function on any non-nil errors returned by Apply.
+//
+// If no rollback was needed or if the rollback was successful, RollbackError returns nil,
+// otherwise it returns the error encountered when trying to roll back.
+func RollbackError(err error) error {
+ if err == nil {
+ return nil
+ }
+ if rerr, ok := err.(*rollbackErr); ok {
+ return rerr.rollbackErr
+ }
+ return nil
+}
+
+type rollbackErr struct {
+ error // original error
+ rollbackErr error // error encountered while rolling back
+}
+
+type Options struct {
+ // TargetPath defines the path to the file to update.
+ // The emptry string means 'the executable file of the running program'.
+ TargetPath string
+
+ // Create TargetPath replacement with this file mode. If zero, defaults to 0755.
+ TargetMode os.FileMode
+
+ // Checksum of the new binary to verify against. If nil, no checksum or signature verification is done.
+ Checksum []byte
+
+ // Public key to use for signature verification. If nil, no signature verification is done.
+ PublicKey crypto.PublicKey
+
+ // Signature to verify the updated file. If nil, no signature verification is done.
+ Signature []byte
+
+ // Pluggable signature verification algorithm. If nil, ECDSA is used.
+ Verifier Verifier
+
+ // Use this hash function to generate the checksum. If not set, SHA256 is used.
+ Hash crypto.Hash
+
+ // If nil, treat the update as a complete replacement for the contents of the file at TargetPath.
+ // If non-nil, treat the update contents as a patch and use this object to apply the patch.
+ Patcher Patcher
+
+ // Store the old executable file at this path after a successful update.
+ // The empty string means the old executable file will be removed after the update.
+ OldSavePath string
+}
+
+// CheckPermissions determines whether the process has the correct permissions to
+// perform the requested update. If the update can proceed, it returns nil, otherwise
+// it returns the error that would occur if an update were attempted.
+func (o *Options) CheckPermissions() error {
+ // get the directory the file exists in
+ path, err := o.getPath()
+ if err != nil {
+ return err
+ }
+
+ fileDir := filepath.Dir(path)
+ fileName := filepath.Base(path)
+
+ // attempt to open a file in the file's directory
+ newPath := filepath.Join(fileDir, fmt.Sprintf(".%s.new", fileName))
+ fp, err := openFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, o.TargetMode)
+ if err != nil {
+ return err
+ }
+ fp.Close()
+
+ _ = os.Remove(newPath)
+ return nil
+}
+
+// SetPublicKeyPEM is a convenience method to set the PublicKey property
+// used for checking a completed update's signature by parsing a
+// Public Key formatted as PEM data.
+func (o *Options) SetPublicKeyPEM(pembytes []byte) error {
+ block, _ := pem.Decode(pembytes)
+ if block == nil {
+ return errors.New("couldn't parse PEM data")
+ }
+
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+ o.PublicKey = pub
+ return nil
+}
+
+func (o *Options) getPath() (string, error) {
+ if o.TargetPath == "" {
+ return osext.Executable()
+ } else {
+ return o.TargetPath, nil
+ }
+}
+
+func (o *Options) applyPatch(patch io.Reader) ([]byte, error) {
+ // open the file to patch
+ old, err := os.Open(o.TargetPath)
+ if err != nil {
+ return nil, err
+ }
+ defer old.Close()
+
+ // apply the patch
+ var applied bytes.Buffer
+ if err = o.Patcher.Patch(old, &applied, patch); err != nil {
+ return nil, err
+ }
+
+ return applied.Bytes(), nil
+}
+
+func (o *Options) verifyChecksum(updated []byte) error {
+ checksum, err := checksumFor(o.Hash, updated)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(o.Checksum, checksum) {
+ return fmt.Errorf("Updated file has wrong checksum. Expected: %x, got: %x", o.Checksum, checksum)
+ }
+ return nil
+}
+
+func (o *Options) verifySignature(updated []byte) error {
+ checksum, err := checksumFor(o.Hash, updated)
+ if err != nil {
+ return err
+ }
+ return o.Verifier.VerifySignature(checksum, o.Signature, o.Hash, o.PublicKey)
+}
+
+func checksumFor(h crypto.Hash, payload []byte) ([]byte, error) {
+ if !h.Available() {
+ return nil, errors.New("requested hash function not available")
+ }
+ hash := h.New()
+ hash.Write(payload) // guaranteed not to error
+ return hash.Sum([]byte{}), nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/doc.go b/vendor/github.com/inconshreveable/go-update/doc.go
new file mode 100644
index 0000000..468411f
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/doc.go
@@ -0,0 +1,172 @@
+/*
+Package update provides functionality to implement secure, self-updating Go programs (or other single-file targets).
+
+For complete updating solutions please see Equinox (https://equinox.io) and go-tuf (https://github.com/flynn/go-tuf).
+
+Basic Example
+
+This example shows how to update a program remotely from a URL.
+
+ import (
+ "fmt"
+ "net/http"
+
+ "github.com/inconshreveable/go-update"
+ )
+
+ func doUpdate(url string) error {
+ // request the new file
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ err := update.Apply(resp.Body, update.Options{})
+ if err != nil {
+ if rerr := update.RollbackError(err); rerr != nil {
+ fmt.Println("Failed to rollback from bad update: %v", rerr)
+ }
+ }
+ return err
+ }
+
+
+Binary Patching
+
+Go binaries can often be large. It can be advantageous to only ship a binary patch to a client
+instead of the complete program text of a new version.
+
+This example shows how to update a program with a bsdiff binary patch. Other patch formats
+may be applied by implementing the Patcher interface.
+
+ import (
+ "encoding/hex"
+ "io"
+
+ "github.com/inconshreveable/go-update"
+ )
+
+ func updateWithPatch(patch io.Reader) error {
+ err := update.Apply(patch, update.Options{
+ Patcher: update.NewBSDiffPatcher()
+ })
+ if err != nil {
+ // error handling
+ }
+ return err
+ }
+
+Checksum Verification
+
+Updating executable code on a computer can be a dangerous operation unless you
+take the appropriate steps to guarantee the authenticity of the new code. While
+checksum verification is important, it should always be combined with signature
+verification (next section) to guarantee that the code came from a trusted party.
+
+go-update validates SHA256 checksums by default, but this is pluggable via the Hash
+property on the Options struct.
+
+This example shows how to guarantee that the newly-updated binary is verified to
+have an appropriate checksum (that was otherwise retrived via a secure channel)
+specified as a hex string.
+
+ import (
+ "crypto"
+ _ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "github.com/inconshreveable/go-update"
+ )
+
+ func updateWithChecksum(binary io.Reader, hexChecksum string) error {
+ checksum, err := hex.DecodeString(hexChecksum)
+ if err != nil {
+ return err
+ }
+ err = update.Apply(binary, update.Options{
+ Hash: crypto.SHA256, // this is the default, you don't need to specify it
+ Checksum: checksum,
+ })
+ if err != nil {
+ // error handling
+ }
+ return err
+ }
+
+Cryptographic Signature Verification
+
+Cryptographic verification of new code from an update is an extremely important way to guarantee the
+security and integrity of your updates.
+
+Verification is performed by validating the signature of a hash of the new file. This
+means nothing changes if you apply your update with a patch.
+
+This example shows how to add signature verification to your updates. To make all of this work
+an application distributor must first create a public/private key pair and embed the public key
+into their application. When they issue a new release, the issuer must sign the new executable file
+with the private key and distribute the signature along with the update.
+
+ import (
+ "crypto"
+ _ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "github.com/inconshreveable/go-update"
+ )
+
+ var publicKey = []byte(`
+ -----BEGIN PUBLIC KEY-----
+ MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEtrVmBxQvheRArXjg2vG1xIprWGuCyESx
+ MMY8pjmjepSy2kuz+nl9aFLqmr+rDNdYvEBqQaZrYMc6k29gjvoQnQ==
+ -----END PUBLIC KEY-----
+ `)
+
+ func verifiedUpdate(binary io.Reader, hexChecksum, hexSignature string) {
+ checksum, err := hex.DecodeString(hexChecksum)
+ if err != nil {
+ return err
+ }
+ signature, err := hex.DecodeString(hexSignature)
+ if err != nil {
+ return err
+ }
+ opts := update.Options{
+ Checksum: checksum,
+ Signature: signature,
+ Hash: crypto.SHA256, // this is the default, you don't need to specify it
+ Verifier: update.NewECDSAVerifier(), // this is the default, you don't need to specify it
+ }
+ err = opts.SetPublicKeyPEM(publicKey)
+ if err != nil {
+ return err
+ }
+ err = update.Apply(binary, opts)
+ if err != nil {
+ // error handling
+ }
+ return err
+ }
+
+
+Building Single-File Go Binaries
+
+In order to update a Go application with go-update, you must distributed it as a single executable.
+This is often easy, but some applications require static assets (like HTML and CSS asset files or TLS certificates).
+In order to update applications like these, you'll want to make sure to embed those asset files into
+the distributed binary with a tool like go-bindata (my favorite): https://github.com/jteeuwen/go-bindata
+
+Non-Goals
+
+Mechanisms and protocols for determining whether an update should be applied and, if so, which one are
+out of scope for this package. Please consult go-tuf (https://github.com/flynn/go-tuf) or Equinox (https://equinox.io)
+for more complete solutions.
+
+go-update only works for self-updating applications that are distributed as a single binary, i.e.
+applications that do not have additional assets or dependency files.
+Updating application that are distributed as mutliple on-disk files is out of scope, although this
+may change in future versions of this library.
+
+*/
+package update
diff --git a/vendor/github.com/inconshreveable/go-update/hide_noop.go b/vendor/github.com/inconshreveable/go-update/hide_noop.go
new file mode 100644
index 0000000..3707756
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/hide_noop.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package update
+
+func hideFile(path string) error {
+ return nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/hide_windows.go b/vendor/github.com/inconshreveable/go-update/hide_windows.go
new file mode 100644
index 0000000..c368b9c
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/hide_windows.go
@@ -0,0 +1,19 @@
+package update
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func hideFile(path string) error {
+ kernel32 := syscall.NewLazyDLL("kernel32.dll")
+ setFileAttributes := kernel32.NewProc("SetFileAttributesW")
+
+ r1, _, err := setFileAttributes.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), 2)
+
+ if r1 == 0 {
+ return err
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/binarydist/License b/vendor/github.com/inconshreveable/go-update/internal/binarydist/License
new file mode 100644
index 0000000..183c389
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/binarydist/License
@@ -0,0 +1,22 @@
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/inconshreveable/go-update/internal/binarydist/Readme.md b/vendor/github.com/inconshreveable/go-update/internal/binarydist/Readme.md
new file mode 100644
index 0000000..dadc368
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/binarydist/Readme.md
@@ -0,0 +1,7 @@
+# binarydist
+
+Package binarydist implements binary diff and patch as described on
+. It reads and writes files
+compatible with the tools there.
+
+Documentation at .
diff --git a/vendor/github.com/inconshreveable/go-update/internal/binarydist/bzip2.go b/vendor/github.com/inconshreveable/go-update/internal/binarydist/bzip2.go
new file mode 100644
index 0000000..a2516b8
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/binarydist/bzip2.go
@@ -0,0 +1,40 @@
+package binarydist
+
+import (
+ "io"
+ "os/exec"
+)
+
+type bzip2Writer struct {
+ c *exec.Cmd
+ w io.WriteCloser
+}
+
+func (w bzip2Writer) Write(b []byte) (int, error) {
+ return w.w.Write(b)
+}
+
+func (w bzip2Writer) Close() error {
+ if err := w.w.Close(); err != nil {
+ return err
+ }
+ return w.c.Wait()
+}
+
+// Package compress/bzip2 implements only decompression,
+// so we'll fake it by running bzip2 in another process.
+func newBzip2Writer(w io.Writer) (wc io.WriteCloser, err error) {
+ var bw bzip2Writer
+ bw.c = exec.Command("bzip2", "-c")
+ bw.c.Stdout = w
+
+ if bw.w, err = bw.c.StdinPipe(); err != nil {
+ return nil, err
+ }
+
+ if err = bw.c.Start(); err != nil {
+ return nil, err
+ }
+
+ return bw, nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/binarydist/diff.go b/vendor/github.com/inconshreveable/go-update/internal/binarydist/diff.go
new file mode 100644
index 0000000..1d2d951
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/binarydist/diff.go
@@ -0,0 +1,408 @@
+package binarydist
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "io/ioutil"
+)
+
+func swap(a []int, i, j int) { a[i], a[j] = a[j], a[i] }
+
+func split(I, V []int, start, length, h int) {
+ var i, j, k, x, jj, kk int
+
+ if length < 16 {
+ for k = start; k < start+length; k += j {
+ j = 1
+ x = V[I[k]+h]
+ for i = 1; k+i < start+length; i++ {
+ if V[I[k+i]+h] < x {
+ x = V[I[k+i]+h]
+ j = 0
+ }
+ if V[I[k+i]+h] == x {
+ swap(I, k+i, k+j)
+ j++
+ }
+ }
+ for i = 0; i < j; i++ {
+ V[I[k+i]] = k + j - 1
+ }
+ if j == 1 {
+ I[k] = -1
+ }
+ }
+ return
+ }
+
+ x = V[I[start+length/2]+h]
+ jj = 0
+ kk = 0
+ for i = start; i < start+length; i++ {
+ if V[I[i]+h] < x {
+ jj++
+ }
+ if V[I[i]+h] == x {
+ kk++
+ }
+ }
+ jj += start
+ kk += jj
+
+ i = start
+ j = 0
+ k = 0
+ for i < jj {
+ if V[I[i]+h] < x {
+ i++
+ } else if V[I[i]+h] == x {
+ swap(I, i, jj+j)
+ j++
+ } else {
+ swap(I, i, kk+k)
+ k++
+ }
+ }
+
+ for jj+j < kk {
+ if V[I[jj+j]+h] == x {
+ j++
+ } else {
+ swap(I, jj+j, kk+k)
+ k++
+ }
+ }
+
+ if jj > start {
+ split(I, V, start, jj-start, h)
+ }
+
+ for i = 0; i < kk-jj; i++ {
+ V[I[jj+i]] = kk - 1
+ }
+ if jj == kk-1 {
+ I[jj] = -1
+ }
+
+ if start+length > kk {
+ split(I, V, kk, start+length-kk, h)
+ }
+}
+
+func qsufsort(obuf []byte) []int {
+ var buckets [256]int
+ var i, h int
+ I := make([]int, len(obuf)+1)
+ V := make([]int, len(obuf)+1)
+
+ for _, c := range obuf {
+ buckets[c]++
+ }
+ for i = 1; i < 256; i++ {
+ buckets[i] += buckets[i-1]
+ }
+ copy(buckets[1:], buckets[:])
+ buckets[0] = 0
+
+ for i, c := range obuf {
+ buckets[c]++
+ I[buckets[c]] = i
+ }
+
+ I[0] = len(obuf)
+ for i, c := range obuf {
+ V[i] = buckets[c]
+ }
+
+ V[len(obuf)] = 0
+ for i = 1; i < 256; i++ {
+ if buckets[i] == buckets[i-1]+1 {
+ I[buckets[i]] = -1
+ }
+ }
+ I[0] = -1
+
+ for h = 1; I[0] != -(len(obuf) + 1); h += h {
+ var n int
+ for i = 0; i < len(obuf)+1; {
+ if I[i] < 0 {
+ n -= I[i]
+ i -= I[i]
+ } else {
+ if n != 0 {
+ I[i-n] = -n
+ }
+ n = V[I[i]] + 1 - i
+ split(I, V, i, n, h)
+ i += n
+ n = 0
+ }
+ }
+ if n != 0 {
+ I[i-n] = -n
+ }
+ }
+
+ for i = 0; i < len(obuf)+1; i++ {
+ I[V[i]] = i
+ }
+ return I
+}
+
+func matchlen(a, b []byte) (i int) {
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+func search(I []int, obuf, nbuf []byte, st, en int) (pos, n int) {
+ if en-st < 2 {
+ x := matchlen(obuf[I[st]:], nbuf)
+ y := matchlen(obuf[I[en]:], nbuf)
+
+ if x > y {
+ return I[st], x
+ } else {
+ return I[en], y
+ }
+ }
+
+ x := st + (en-st)/2
+ if bytes.Compare(obuf[I[x]:], nbuf) < 0 {
+ return search(I, obuf, nbuf, x, en)
+ } else {
+ return search(I, obuf, nbuf, st, x)
+ }
+ panic("unreached")
+}
+
+// Diff computes the difference between old and new, according to the bsdiff
+// algorithm, and writes the result to patch.
+func Diff(old, new io.Reader, patch io.Writer) error {
+ obuf, err := ioutil.ReadAll(old)
+ if err != nil {
+ return err
+ }
+
+ nbuf, err := ioutil.ReadAll(new)
+ if err != nil {
+ return err
+ }
+
+ pbuf, err := diffBytes(obuf, nbuf)
+ if err != nil {
+ return err
+ }
+
+ _, err = patch.Write(pbuf)
+ return err
+}
+
+func diffBytes(obuf, nbuf []byte) ([]byte, error) {
+ var patch seekBuffer
+ err := diff(obuf, nbuf, &patch)
+ if err != nil {
+ return nil, err
+ }
+ return patch.buf, nil
+}
+
+func diff(obuf, nbuf []byte, patch io.WriteSeeker) error {
+ var lenf int
+ I := qsufsort(obuf)
+ db := make([]byte, len(nbuf))
+ eb := make([]byte, len(nbuf))
+ var dblen, eblen int
+
+ var hdr header
+ hdr.Magic = magic
+ hdr.NewSize = int64(len(nbuf))
+ err := binary.Write(patch, signMagLittleEndian{}, &hdr)
+ if err != nil {
+ return err
+ }
+
+ // Compute the differences, writing ctrl as we go
+ pfbz2, err := newBzip2Writer(patch)
+ if err != nil {
+ return err
+ }
+ var scan, pos, length int
+ var lastscan, lastpos, lastoffset int
+ for scan < len(nbuf) {
+ var oldscore int
+ scan += length
+ for scsc := scan; scan < len(nbuf); scan++ {
+ pos, length = search(I, obuf, nbuf[scan:], 0, len(obuf))
+
+ for ; scsc < scan+length; scsc++ {
+ if scsc+lastoffset < len(obuf) &&
+ obuf[scsc+lastoffset] == nbuf[scsc] {
+ oldscore++
+ }
+ }
+
+ if (length == oldscore && length != 0) || length > oldscore+8 {
+ break
+ }
+
+ if scan+lastoffset < len(obuf) && obuf[scan+lastoffset] == nbuf[scan] {
+ oldscore--
+ }
+ }
+
+ if length != oldscore || scan == len(nbuf) {
+ var s, Sf int
+ lenf = 0
+ for i := 0; lastscan+i < scan && lastpos+i < len(obuf); {
+ if obuf[lastpos+i] == nbuf[lastscan+i] {
+ s++
+ }
+ i++
+ if s*2-i > Sf*2-lenf {
+ Sf = s
+ lenf = i
+ }
+ }
+
+ lenb := 0
+ if scan < len(nbuf) {
+ var s, Sb int
+ for i := 1; (scan >= lastscan+i) && (pos >= i); i++ {
+ if obuf[pos-i] == nbuf[scan-i] {
+ s++
+ }
+ if s*2-i > Sb*2-lenb {
+ Sb = s
+ lenb = i
+ }
+ }
+ }
+
+ if lastscan+lenf > scan-lenb {
+ overlap := (lastscan + lenf) - (scan - lenb)
+ s := 0
+ Ss := 0
+ lens := 0
+ for i := 0; i < overlap; i++ {
+ if nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {
+ s++
+ }
+ if nbuf[scan-lenb+i] == obuf[pos-lenb+i] {
+ s--
+ }
+ if s > Ss {
+ Ss = s
+ lens = i + 1
+ }
+ }
+
+ lenf += lens - overlap
+ lenb -= lens
+ }
+
+ for i := 0; i < lenf; i++ {
+ db[dblen+i] = nbuf[lastscan+i] - obuf[lastpos+i]
+ }
+ for i := 0; i < (scan-lenb)-(lastscan+lenf); i++ {
+ eb[eblen+i] = nbuf[lastscan+lenf+i]
+ }
+
+ dblen += lenf
+ eblen += (scan - lenb) - (lastscan + lenf)
+
+ err = binary.Write(pfbz2, signMagLittleEndian{}, int64(lenf))
+ if err != nil {
+ pfbz2.Close()
+ return err
+ }
+
+ val := (scan - lenb) - (lastscan + lenf)
+ err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
+ if err != nil {
+ pfbz2.Close()
+ return err
+ }
+
+ val = (pos - lenb) - (lastpos + lenf)
+ err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
+ if err != nil {
+ pfbz2.Close()
+ return err
+ }
+
+ lastscan = scan - lenb
+ lastpos = pos - lenb
+ lastoffset = pos - scan
+ }
+ }
+ err = pfbz2.Close()
+ if err != nil {
+ return err
+ }
+
+ // Compute size of compressed ctrl data
+ l64, err := patch.Seek(0, 1)
+ if err != nil {
+ return err
+ }
+ hdr.CtrlLen = int64(l64 - 32)
+
+ // Write compressed diff data
+ pfbz2, err = newBzip2Writer(patch)
+ if err != nil {
+ return err
+ }
+ n, err := pfbz2.Write(db[:dblen])
+ if err != nil {
+ pfbz2.Close()
+ return err
+ }
+ if n != dblen {
+ pfbz2.Close()
+ return io.ErrShortWrite
+ }
+ err = pfbz2.Close()
+ if err != nil {
+ return err
+ }
+
+ // Compute size of compressed diff data
+ n64, err := patch.Seek(0, 1)
+ if err != nil {
+ return err
+ }
+ hdr.DiffLen = n64 - l64
+
+ // Write compressed extra data
+ pfbz2, err = newBzip2Writer(patch)
+ if err != nil {
+ return err
+ }
+ n, err = pfbz2.Write(eb[:eblen])
+ if err != nil {
+ pfbz2.Close()
+ return err
+ }
+ if n != eblen {
+ pfbz2.Close()
+ return io.ErrShortWrite
+ }
+ err = pfbz2.Close()
+ if err != nil {
+ return err
+ }
+
+ // Seek to the beginning, write the header, and close the file
+ _, err = patch.Seek(0, 0)
+ if err != nil {
+ return err
+ }
+ err = binary.Write(patch, signMagLittleEndian{}, &hdr)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/binarydist/doc.go b/vendor/github.com/inconshreveable/go-update/internal/binarydist/doc.go
new file mode 100644
index 0000000..3c92d87
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/binarydist/doc.go
@@ -0,0 +1,24 @@
+// Package binarydist implements binary diff and patch as described on
+// http://www.daemonology.net/bsdiff/. It reads and writes files
+// compatible with the tools there.
+package binarydist
+
+var magic = [8]byte{'B', 'S', 'D', 'I', 'F', 'F', '4', '0'}
+
+// File format:
+// 0 8 "BSDIFF40"
+// 8 8 X
+// 16 8 Y
+// 24 8 sizeof(newfile)
+// 32 X bzip2(control block)
+// 32+X Y bzip2(diff block)
+// 32+X+Y ??? bzip2(extra block)
+// with control block a set of triples (x,y,z) meaning "add x bytes
+// from oldfile to x bytes from the diff block; copy y bytes from the
+// extra block; seek forwards in oldfile by z bytes".
+type header struct {
+ Magic [8]byte
+ CtrlLen int64
+ DiffLen int64
+ NewSize int64
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/binarydist/encoding.go b/vendor/github.com/inconshreveable/go-update/internal/binarydist/encoding.go
new file mode 100644
index 0000000..75ba585
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/binarydist/encoding.go
@@ -0,0 +1,53 @@
+package binarydist
+
+// SignMagLittleEndian is the numeric encoding used by the bsdiff tools.
+// It implements binary.ByteOrder using a sign-magnitude format
+// and little-endian byte order. Only methods Uint64 and String
+// have been written; the rest panic.
+type signMagLittleEndian struct{}
+
+func (signMagLittleEndian) Uint16(b []byte) uint16 { panic("unimplemented") }
+
+func (signMagLittleEndian) PutUint16(b []byte, v uint16) { panic("unimplemented") }
+
+func (signMagLittleEndian) Uint32(b []byte) uint32 { panic("unimplemented") }
+
+func (signMagLittleEndian) PutUint32(b []byte, v uint32) { panic("unimplemented") }
+
+func (signMagLittleEndian) Uint64(b []byte) uint64 {
+ y := int64(b[0]) |
+ int64(b[1])<<8 |
+ int64(b[2])<<16 |
+ int64(b[3])<<24 |
+ int64(b[4])<<32 |
+ int64(b[5])<<40 |
+ int64(b[6])<<48 |
+ int64(b[7]&0x7f)<<56
+
+ if b[7]&0x80 != 0 {
+ y = -y
+ }
+ return uint64(y)
+}
+
+func (signMagLittleEndian) PutUint64(b []byte, v uint64) {
+ x := int64(v)
+ neg := x < 0
+ if neg {
+ x = -x
+ }
+
+ b[0] = byte(x)
+ b[1] = byte(x >> 8)
+ b[2] = byte(x >> 16)
+ b[3] = byte(x >> 24)
+ b[4] = byte(x >> 32)
+ b[5] = byte(x >> 40)
+ b[6] = byte(x >> 48)
+ b[7] = byte(x >> 56)
+ if neg {
+ b[7] |= 0x80
+ }
+}
+
+func (signMagLittleEndian) String() string { return "signMagLittleEndian" }
diff --git a/vendor/github.com/inconshreveable/go-update/internal/binarydist/patch.go b/vendor/github.com/inconshreveable/go-update/internal/binarydist/patch.go
new file mode 100644
index 0000000..eb03225
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/binarydist/patch.go
@@ -0,0 +1,109 @@
+package binarydist
+
+import (
+ "bytes"
+ "compress/bzip2"
+ "encoding/binary"
+ "errors"
+ "io"
+ "io/ioutil"
+)
+
+var ErrCorrupt = errors.New("corrupt patch")
+
+// Patch applies patch to old, according to the bspatch algorithm,
+// and writes the result to new.
+func Patch(old io.Reader, new io.Writer, patch io.Reader) error {
+ var hdr header
+ err := binary.Read(patch, signMagLittleEndian{}, &hdr)
+ if err != nil {
+ return err
+ }
+ if hdr.Magic != magic {
+ return ErrCorrupt
+ }
+ if hdr.CtrlLen < 0 || hdr.DiffLen < 0 || hdr.NewSize < 0 {
+ return ErrCorrupt
+ }
+
+ ctrlbuf := make([]byte, hdr.CtrlLen)
+ _, err = io.ReadFull(patch, ctrlbuf)
+ if err != nil {
+ return err
+ }
+ cpfbz2 := bzip2.NewReader(bytes.NewReader(ctrlbuf))
+
+ diffbuf := make([]byte, hdr.DiffLen)
+ _, err = io.ReadFull(patch, diffbuf)
+ if err != nil {
+ return err
+ }
+ dpfbz2 := bzip2.NewReader(bytes.NewReader(diffbuf))
+
+ // The entire rest of the file is the extra block.
+ epfbz2 := bzip2.NewReader(patch)
+
+ obuf, err := ioutil.ReadAll(old)
+ if err != nil {
+ return err
+ }
+
+ nbuf := make([]byte, hdr.NewSize)
+
+ var oldpos, newpos int64
+ for newpos < hdr.NewSize {
+ var ctrl struct{ Add, Copy, Seek int64 }
+ err = binary.Read(cpfbz2, signMagLittleEndian{}, &ctrl)
+ if err != nil {
+ return err
+ }
+
+ // Sanity-check
+ if newpos+ctrl.Add > hdr.NewSize {
+ return ErrCorrupt
+ }
+
+ // Read diff string
+ _, err = io.ReadFull(dpfbz2, nbuf[newpos:newpos+ctrl.Add])
+ if err != nil {
+ return ErrCorrupt
+ }
+
+ // Add old data to diff string
+ for i := int64(0); i < ctrl.Add; i++ {
+ if oldpos+i >= 0 && oldpos+i < int64(len(obuf)) {
+ nbuf[newpos+i] += obuf[oldpos+i]
+ }
+ }
+
+ // Adjust pointers
+ newpos += ctrl.Add
+ oldpos += ctrl.Add
+
+ // Sanity-check
+ if newpos+ctrl.Copy > hdr.NewSize {
+ return ErrCorrupt
+ }
+
+ // Read extra string
+ _, err = io.ReadFull(epfbz2, nbuf[newpos:newpos+ctrl.Copy])
+ if err != nil {
+ return ErrCorrupt
+ }
+
+ // Adjust pointers
+ newpos += ctrl.Copy
+ oldpos += ctrl.Seek
+ }
+
+ // Write the new file
+ for len(nbuf) > 0 {
+ n, err := new.Write(nbuf)
+ if err != nil {
+ return err
+ }
+ nbuf = nbuf[n:]
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/binarydist/seek.go b/vendor/github.com/inconshreveable/go-update/internal/binarydist/seek.go
new file mode 100644
index 0000000..96c0346
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/binarydist/seek.go
@@ -0,0 +1,43 @@
+package binarydist
+
+import (
+ "errors"
+)
+
+type seekBuffer struct {
+ buf []byte
+ pos int
+}
+
+func (b *seekBuffer) Write(p []byte) (n int, err error) {
+ n = copy(b.buf[b.pos:], p)
+ if n == len(p) {
+ b.pos += n
+ return n, nil
+ }
+ b.buf = append(b.buf, p[n:]...)
+ b.pos += len(p)
+ return len(p), nil
+}
+
+func (b *seekBuffer) Seek(offset int64, whence int) (ret int64, err error) {
+ var abs int64
+ switch whence {
+ case 0:
+ abs = offset
+ case 1:
+ abs = int64(b.pos) + offset
+ case 2:
+ abs = int64(len(b.buf)) + offset
+ default:
+ return 0, errors.New("binarydist: invalid whence")
+ }
+ if abs < 0 {
+ return 0, errors.New("binarydist: negative position")
+ }
+ if abs >= 1<<31 {
+ return 0, errors.New("binarydist: position out of range")
+ }
+ b.pos = int(abs)
+ return abs, nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/osext/LICENSE b/vendor/github.com/inconshreveable/go-update/internal/osext/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/osext/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/inconshreveable/go-update/internal/osext/README.md b/vendor/github.com/inconshreveable/go-update/internal/osext/README.md
new file mode 100644
index 0000000..61350ba
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/osext/README.md
@@ -0,0 +1,16 @@
+### Extensions to the "os" package.
+
+## Find the current Executable and ExecutableFolder.
+
+There is sometimes utility in finding the current executable file
+that is running. This can be used for upgrading the current executable
+or finding resources located relative to the executable file. Both
+working directory and the os.Args[0] value are arbitrary and cannot
+be relied on; os.Args[0] can be "faked".
+
+Multi-platform and supports:
+ * Linux
+ * OS X
+ * Windows
+ * Plan 9
+ * BSDs.
diff --git a/vendor/github.com/inconshreveable/go-update/internal/osext/osext.go b/vendor/github.com/inconshreveable/go-update/internal/osext/osext.go
new file mode 100644
index 0000000..7bef46f
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/osext/osext.go
@@ -0,0 +1,27 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Extensions to the standard "os" package.
+package osext
+
+import "path/filepath"
+
+// Executable returns an absolute path that can be used to
+// re-invoke the current program.
+// It may not be valid after the current program exits.
+func Executable() (string, error) {
+ p, err := executable()
+ return filepath.Clean(p), err
+}
+
+// Returns same path as Executable, returns just the folder
+// path. Excludes the executable name and any trailing slash.
+func ExecutableFolder() (string, error) {
+ p, err := Executable()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Dir(p), nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/osext/osext_plan9.go b/vendor/github.com/inconshreveable/go-update/internal/osext/osext_plan9.go
new file mode 100644
index 0000000..655750c
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/osext/osext_plan9.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package osext
+
+import (
+ "os"
+ "strconv"
+ "syscall"
+)
+
+func executable() (string, error) {
+ f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ return syscall.Fd2path(int(f.Fd()))
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/osext/osext_procfs.go b/vendor/github.com/inconshreveable/go-update/internal/osext/osext_procfs.go
new file mode 100644
index 0000000..b2598bc
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/osext/osext_procfs.go
@@ -0,0 +1,36 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux netbsd openbsd solaris dragonfly
+
+package osext
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+)
+
+func executable() (string, error) {
+ switch runtime.GOOS {
+ case "linux":
+ const deletedTag = " (deleted)"
+ execpath, err := os.Readlink("/proc/self/exe")
+ if err != nil {
+ return execpath, err
+ }
+ execpath = strings.TrimSuffix(execpath, deletedTag)
+ execpath = strings.TrimPrefix(execpath, deletedTag)
+ return execpath, nil
+ case "netbsd":
+ return os.Readlink("/proc/curproc/exe")
+ case "openbsd", "dragonfly":
+ return os.Readlink("/proc/curproc/file")
+ case "solaris":
+ return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
+ }
+ return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/osext/osext_sysctl.go b/vendor/github.com/inconshreveable/go-update/internal/osext/osext_sysctl.go
new file mode 100644
index 0000000..b66cac8
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/osext/osext_sysctl.go
@@ -0,0 +1,79 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd
+
+package osext
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+var initCwd, initCwdErr = os.Getwd()
+
+func executable() (string, error) {
+ var mib [4]int32
+ switch runtime.GOOS {
+ case "freebsd":
+ mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
+ case "darwin":
+ mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
+ }
+
+ n := uintptr(0)
+ // Get length.
+ _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
+ if errNum != 0 {
+ return "", errNum
+ }
+ if n == 0 { // This shouldn't happen.
+ return "", nil
+ }
+ buf := make([]byte, n)
+ _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
+ if errNum != 0 {
+ return "", errNum
+ }
+ if n == 0 { // This shouldn't happen.
+ return "", nil
+ }
+ for i, v := range buf {
+ if v == 0 {
+ buf = buf[:i]
+ break
+ }
+ }
+ var err error
+ execPath := string(buf)
+ // execPath will not be empty due to above checks.
+ // Try to get the absolute path if the execPath is not rooted.
+ if execPath[0] != '/' {
+ execPath, err = getAbs(execPath)
+ if err != nil {
+ return execPath, err
+ }
+ }
+ // For darwin KERN_PROCARGS may return the path to a symlink rather than the
+ // actual executable.
+ if runtime.GOOS == "darwin" {
+ if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
+ return execPath, err
+ }
+ }
+ return execPath, nil
+}
+
+func getAbs(execPath string) (string, error) {
+ if initCwdErr != nil {
+ return execPath, initCwdErr
+ }
+ // The execPath may begin with a "../" or a "./" so clean it first.
+ // Join the two paths, trailing and starting slashes undetermined, so use
+ // the generic Join function.
+ return filepath.Join(initCwd, filepath.Clean(execPath)), nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/internal/osext/osext_windows.go b/vendor/github.com/inconshreveable/go-update/internal/osext/osext_windows.go
new file mode 100644
index 0000000..72d282c
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/internal/osext/osext_windows.go
@@ -0,0 +1,34 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package osext
+
+import (
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
+)
+
+// GetModuleFileName() with hModule = NULL
+func executable() (exePath string, err error) {
+ return getModuleFileName()
+}
+
+func getModuleFileName() (string, error) {
+ var n uint32
+ b := make([]uint16, syscall.MAX_PATH)
+ size := uint32(len(b))
+
+ r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
+ n = uint32(r0)
+ if n == 0 {
+ return "", e1
+ }
+ return string(utf16.Decode(b[0:n])), nil
+}
diff --git a/vendor/github.com/inconshreveable/go-update/patcher.go b/vendor/github.com/inconshreveable/go-update/patcher.go
new file mode 100644
index 0000000..1da89cb
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/patcher.go
@@ -0,0 +1,24 @@
+package update
+
+import (
+ "io"
+
+ "github.com/inconshreveable/go-update/internal/binarydist"
+)
+
+// Patcher defines an interface for applying binary patches to an old item to get an updated item.
+type Patcher interface {
+ Patch(old io.Reader, new io.Writer, patch io.Reader) error
+}
+
+type patchFn func(io.Reader, io.Writer, io.Reader) error
+
+func (fn patchFn) Patch(old io.Reader, new io.Writer, patch io.Reader) error {
+ return fn(old, new, patch)
+}
+
+// NewBSDifferPatcher returns a new Patcher that applies binary patches using
+// the bsdiff algorithm. See http://www.daemonology.net/bsdiff/
+func NewBSDiffPatcher() Patcher {
+ return patchFn(binarydist.Patch)
+}
diff --git a/vendor/github.com/inconshreveable/go-update/verifier.go b/vendor/github.com/inconshreveable/go-update/verifier.go
new file mode 100644
index 0000000..af1fc57
--- /dev/null
+++ b/vendor/github.com/inconshreveable/go-update/verifier.go
@@ -0,0 +1,74 @@
+package update
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "encoding/asn1"
+ "errors"
+ "math/big"
+)
+
+// Verifier defines an interface for verfiying an update's signature with a public key.
+type Verifier interface {
+ VerifySignature(checksum, signature []byte, h crypto.Hash, publicKey crypto.PublicKey) error
+}
+
+type verifyFn func([]byte, []byte, crypto.Hash, crypto.PublicKey) error
+
+func (fn verifyFn) VerifySignature(checksum []byte, signature []byte, hash crypto.Hash, publicKey crypto.PublicKey) error {
+ return fn(checksum, signature, hash, publicKey)
+}
+
+// NewRSAVerifier returns a Verifier that uses the RSA algorithm to verify updates.
+func NewRSAVerifier() Verifier {
+ return verifyFn(func(checksum, signature []byte, hash crypto.Hash, publicKey crypto.PublicKey) error {
+ key, ok := publicKey.(*rsa.PublicKey)
+ if !ok {
+ return errors.New("not a valid RSA public key")
+ }
+ return rsa.VerifyPKCS1v15(key, hash, checksum, signature)
+ })
+}
+
+type rsDER struct {
+ R *big.Int
+ S *big.Int
+}
+
+// NewECDSAVerifier returns a Verifier that uses the ECDSA algorithm to verify updates.
+func NewECDSAVerifier() Verifier {
+ return verifyFn(func(checksum, signature []byte, hash crypto.Hash, publicKey crypto.PublicKey) error {
+ key, ok := publicKey.(*ecdsa.PublicKey)
+ if !ok {
+ return errors.New("not a valid ECDSA public key")
+ }
+ var rs rsDER
+ if _, err := asn1.Unmarshal(signature, &rs); err != nil {
+ return err
+ }
+ if !ecdsa.Verify(key, checksum, rs.R, rs.S) {
+ return errors.New("failed to verify ecsda signature")
+ }
+ return nil
+ })
+}
+
+// NewDSAVerifier returns a Verifier that uses the DSA algorithm to verify updates.
+func NewDSAVerifier() Verifier {
+ return verifyFn(func(checksum, signature []byte, hash crypto.Hash, publicKey crypto.PublicKey) error {
+ key, ok := publicKey.(*dsa.PublicKey)
+ if !ok {
+ return errors.New("not a valid DSA public key")
+ }
+ var rs rsDER
+ if _, err := asn1.Unmarshal(signature, &rs); err != nil {
+ return err
+ }
+ if !dsa.Verify(key, checksum, rs.R, rs.S) {
+ return errors.New("failed to verify ecsda signature")
+ }
+ return nil
+ })
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 0000000..5f0d1fb
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 0000000..7a950d1
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 0000000..9d2d8a4
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 0000000..336142a
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // defined by the Win32 API
+ th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+ Process32First = kernel.MustFindProc("Process32FirstW")
+ Process32Next = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+ dwSize uint32
+ cntUsage uint32
+ th32ProcessID uint32
+ th32DefaultHeapID int
+ th32ModuleID uint32
+ cntThreads uint32
+ th32ParentProcessID uint32
+ pcPriClassBase int32
+ dwFlags uint32
+ szExeFile [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+ snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+ if snapshot == uintptr(syscall.InvalidHandle) {
+ err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+ return
+ }
+ defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+ var processEntry processEntry32
+ processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+ ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32First: %v", e1)
+ return
+ }
+
+ for {
+ if processEntry.th32ProcessID == uint32(pid) {
+ pe = &processEntry
+ return
+ }
+
+ ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32Next: %v", e1)
+ return
+ }
+ }
+}
+
+func getppid() (pid int, err error) {
+ pe, err := getProcessEntry(os.Getpid())
+ if err != nil {
+ return
+ }
+
+ pid = int(pe.th32ParentProcessID)
+ return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ ppid, err := getppid()
+ if err != nil {
+ return false
+ }
+
+ pe, err := getProcessEntry(ppid)
+ if err != nil {
+ return false
+ }
+
+ name := syscall.UTF16ToString(pe.szExeFile[:])
+ return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 0000000..9a28e57
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(os.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/maruel/panicparse/LICENSE b/vendor/github.com/maruel/panicparse/LICENSE
new file mode 100644
index 0000000..b76840c
--- /dev/null
+++ b/vendor/github.com/maruel/panicparse/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 Marc-Antoine Ruel
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/maruel/panicparse/stack/source.go b/vendor/github.com/maruel/panicparse/stack/source.go
new file mode 100644
index 0000000..f09e673
--- /dev/null
+++ b/vendor/github.com/maruel/panicparse/stack/source.go
@@ -0,0 +1,291 @@
+// Copyright 2015 Marc-Antoine Ruel. All rights reserved.
+// Use of this source code is governed under the Apache License, Version 2.0
+// that can be found in the LICENSE file.
+
+// This file contains the code to process sources, to be able to deduct the
+// original types.
+
+package stack
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "math"
+ "strings"
+)
+
+// cache is a cache of sources on the file system.
+type cache struct {
+ files map[string][]byte
+ parsed map[string]*parsedFile
+}
+
+// Augment processes source files to improve calls to be more descriptive.
+//
+// It modifies goroutines in place.
+func Augment(goroutines []Goroutine) {
+ c := &cache{}
+ for i := range goroutines {
+ c.augmentGoroutine(&goroutines[i])
+ }
+}
+
+// augmentGoroutine processes source files to improve call to be more
+// descriptive.
+//
+// It modifies the routine.
+func (c *cache) augmentGoroutine(goroutine *Goroutine) {
+ if c.files == nil {
+ c.files = map[string][]byte{}
+ }
+ if c.parsed == nil {
+ c.parsed = map[string]*parsedFile{}
+ }
+ // For each call site, look at the next call and populate it. Then we can
+ // walk back and reformat things.
+ for i := range goroutine.Stack.Calls {
+ c.load(goroutine.Stack.Calls[i].SourcePath)
+ }
+
+ // Once all loaded, we can look at the next call when available.
+ for i := 1; i < len(goroutine.Stack.Calls); i++ {
+ // Get the AST from the previous call and process the call line with it.
+ if f := c.getFuncAST(&goroutine.Stack.Calls[i]); f != nil {
+ processCall(&goroutine.Stack.Calls[i], f)
+ }
+ }
+}
+
+// Private stuff.
+
+// load loads a source file and parses the AST tree. Failures are ignored.
+func (c *cache) load(fileName string) {
+ if _, ok := c.parsed[fileName]; ok {
+ return
+ }
+ c.parsed[fileName] = nil
+ if !strings.HasSuffix(fileName, ".go") {
+ // Ignore C and assembly.
+ c.files[fileName] = nil
+ return
+ }
+ log.Printf("load(%s)", fileName)
+ if _, ok := c.files[fileName]; !ok {
+ var err error
+ if c.files[fileName], err = ioutil.ReadFile(fileName); err != nil {
+ log.Printf("Failed to read %s: %s", fileName, err)
+ c.files[fileName] = nil
+ return
+ }
+ }
+ fset := token.NewFileSet()
+ src := c.files[fileName]
+ parsed, err := parser.ParseFile(fset, fileName, src, 0)
+ if err != nil {
+ log.Printf("Failed to parse %s: %s", fileName, err)
+ return
+ }
+ // Convert the line number into raw file offset.
+ offsets := []int{0, 0}
+ start := 0
+ for l := 1; start < len(src); l++ {
+ start += bytes.IndexByte(src[start:], '\n') + 1
+ offsets = append(offsets, start)
+ }
+ c.parsed[fileName] = &parsedFile{offsets, parsed}
+}
+
+func (c *cache) getFuncAST(call *Call) *ast.FuncDecl {
+ if p := c.parsed[call.SourcePath]; p != nil {
+ return p.getFuncAST(call.Func.Name(), call.Line)
+ }
+ return nil
+}
+
+type parsedFile struct {
+ lineToByteOffset []int
+ parsed *ast.File
+}
+
+// getFuncAST gets the callee site function AST representation for the code
+// inside the function f at line l.
+func (p *parsedFile) getFuncAST(f string, l int) (d *ast.FuncDecl) {
+ // Walk the AST to find the lineToByteOffset that fits the line number.
+ var lastFunc *ast.FuncDecl
+ var found ast.Node
+ // Inspect() goes depth first. This means for example that a function like:
+ // func a() {
+ // b := func() {}
+ // c()
+ // }
+ //
+ // Were we are looking at the c() call can return confused values. It is
+ // important to look at the actual ast.Node hierarchy.
+ ast.Inspect(p.parsed, func(n ast.Node) bool {
+ if d != nil {
+ return false
+ }
+ if n == nil {
+ return true
+ }
+ if found != nil {
+ // We are walking up.
+ }
+ if int(n.Pos()) >= p.lineToByteOffset[l] {
+ // We are expecting a ast.CallExpr node. It can be harder to figure out
+ // when there are multiple calls on a single line, as the stack trace
+ // doesn't have file byte offset information, only line based.
+ // gofmt will always format to one function call per line but there can
+ // be edge cases, like:
+ // a = A{Foo(), Bar()}
+ d = lastFunc
+ //p.processNode(call, n)
+ return false
+ } else if f, ok := n.(*ast.FuncDecl); ok {
+ lastFunc = f
+ }
+ return true
+ })
+ return
+}
+
+func name(n ast.Node) string {
+ if _, ok := n.(*ast.InterfaceType); ok {
+ return "interface{}"
+ }
+ if i, ok := n.(*ast.Ident); ok {
+ return i.Name
+ }
+ if _, ok := n.(*ast.FuncType); ok {
+ return "func"
+ }
+ if s, ok := n.(*ast.SelectorExpr); ok {
+ return s.Sel.Name
+ }
+ // TODO(maruel): Implement anything missing.
+ return ""
+}
+
+// fieldToType returns the type name and whether if it's an ellipsis.
+func fieldToType(f *ast.Field) (string, bool) {
+ switch arg := f.Type.(type) {
+ case *ast.ArrayType:
+ return "[]" + name(arg.Elt), false
+ case *ast.Ellipsis:
+ return name(arg.Elt), true
+ case *ast.FuncType:
+ // Do not print the function signature to not overload the trace.
+ return "func", false
+ case *ast.Ident:
+ return arg.Name, false
+ case *ast.InterfaceType:
+ return "interface{}", false
+ case *ast.SelectorExpr:
+ return arg.Sel.Name, false
+ case *ast.StarExpr:
+ return "*" + name(arg.X), false
+ default:
+ // TODO(maruel): Implement anything missing.
+ return "", false
+ }
+}
+
+// extractArgumentsType returns the name of the type of each input argument.
+func extractArgumentsType(f *ast.FuncDecl) ([]string, bool) {
+ var fields []*ast.Field
+ if f.Recv != nil {
+ if len(f.Recv.List) != 1 {
+ panic("Expect only one receiver; please fix panicparse's code")
+ }
+ // If it is an object receiver (vs a pointer receiver), its address is not
+ // printed in the stack trace so it needs to be ignored.
+ if _, ok := f.Recv.List[0].Type.(*ast.StarExpr); ok {
+ fields = append(fields, f.Recv.List[0])
+ }
+ }
+ var types []string
+ extra := false
+ for _, arg := range append(fields, f.Type.Params.List...) {
+ // Assert that extra is only set on the last item of fields?
+ var t string
+ t, extra = fieldToType(arg)
+ mult := len(arg.Names)
+ if mult == 0 {
+ mult = 1
+ }
+ for i := 0; i < mult; i++ {
+ types = append(types, t)
+ }
+ }
+ return types, extra
+}
+
+// processCall walks the function and populate call accordingly.
+func processCall(call *Call, f *ast.FuncDecl) {
+ values := make([]uint64, len(call.Args.Values))
+ for i := range call.Args.Values {
+ values[i] = call.Args.Values[i].Value
+ }
+ index := 0
+ pop := func() uint64 {
+ if len(values) != 0 {
+ x := values[0]
+ values = values[1:]
+ index++
+ return x
+ }
+ return 0
+ }
+ popName := func() string {
+ n := call.Args.Values[index].Name
+ v := pop()
+ if len(n) == 0 {
+ return fmt.Sprintf("0x%x", v)
+ }
+ return n
+ }
+
+ types, extra := extractArgumentsType(f)
+ for i := 0; len(values) != 0; i++ {
+ var t string
+ if i >= len(types) {
+ if !extra {
+ // These are unexpected value! Print them as hex.
+ call.Args.Processed = append(call.Args.Processed, popName())
+ continue
+ }
+ t = types[len(types)-1]
+ } else {
+ t = types[i]
+ }
+ switch t {
+ case "float32":
+ call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%g", math.Float32frombits(uint32(pop()))))
+ case "float64":
+ call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%g", math.Float64frombits(pop())))
+ case "int", "int8", "int16", "int32", "int64", "uint", "uint8", "uint16", "uint32", "uint64":
+ call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%d", pop()))
+ case "string":
+ call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%s(%s, len=%d)", t, popName(), pop()))
+ default:
+ if strings.HasPrefix(t, "*") {
+ call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%s(%s)", t, popName()))
+ } else if strings.HasPrefix(t, "[]") {
+ call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%s(%s len=%d cap=%d)", t, popName(), pop(), pop()))
+ } else {
+ // Assumes it's an interface. For now, discard the object value, which
+ // is probably not a good idea.
+ call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%s(%s)", t, popName()))
+ pop()
+ }
+ }
+ if len(values) == 0 && call.Args.Elided {
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/maruel/panicparse/stack/stack.go b/vendor/github.com/maruel/panicparse/stack/stack.go
new file mode 100644
index 0000000..cfb502e
--- /dev/null
+++ b/vendor/github.com/maruel/panicparse/stack/stack.go
@@ -0,0 +1,832 @@
+// Copyright 2015 Marc-Antoine Ruel. All rights reserved.
+// Use of this source code is governed under the Apache License, Version 2.0
+// that can be found in the LICENSE file.
+
+// Package stack analyzes stack dump of Go processes and simplifies it.
+//
+// It is mostly useful on servers will large number of identical goroutines,
+// making the crash dump harder to read than strictly necesary.
+package stack
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net/url"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+const lockedToThread = "locked to thread"
+
+var (
+ // TODO(maruel): Handle corrupted stack cases:
+ // - missed stack barrier
+ // - found next stack barrier at 0x123; expected
+ // - runtime: unexpected return pc for FUNC_NAME called from 0x123
+
+ reRoutineHeader = regexp.MustCompile("^goroutine (\\d+) \\[([^\\]]+)\\]\\:\n$")
+ reMinutes = regexp.MustCompile("^(\\d+) minutes$")
+ reUnavail = regexp.MustCompile("^(?:\t| +)goroutine running on other thread; stack unavailable")
+ // See gentraceback() in src/runtime/traceback.go for more information.
+ // - Sometimes the source file comes up as "". It is the
+ // compiler than generated these, not the runtime.
+ // - The tab may be replaced with spaces when a user copy-paste it, handle
+ // this transparently.
+ // - "runtime.gopanic" is explicitly replaced with "panic" by gentraceback().
+ // - The +0x123 byte offset is printed when frame.pc > _func.entry. _func is
+ // generated by the linker.
+ // - The +0x123 byte offset is not included with generated code, e.g. unnamed
+ // functions "func·006()" which is generally go func() { ... }()
+ // statements. Since the _func is generated at runtime, it's probably why
+ // _func.entry is not set.
+ // - C calls may have fp=0x123 sp=0x123 appended. I think it normally happens
+ // when a signal is not correctly handled. It is printed with m.throwing>0.
+ // These are discarded.
+ // - For cgo, the source file may be "??".
+ reFile = regexp.MustCompile("^(?:\t| +)(\\?\\?|\\|.+\\.(?:c|go|s))\\:(\\d+)(?:| \\+0x[0-9a-f]+)(?:| fp=0x[0-9a-f]+ sp=0x[0-9a-f]+)\n$")
+ // Sadly, it doesn't note the goroutine number so we could cascade them per
+ // parenthood.
+ reCreated = regexp.MustCompile("^created by (.+)\n$")
+ reFunc = regexp.MustCompile("^(.+)\\((.*)\\)\n$")
+ reElided = regexp.MustCompile("^\\.\\.\\.additional frames elided\\.\\.\\.\n$")
+ // Include frequent GOROOT value on Windows, distro provided and user
+ // installed path. This simplifies the user's life when processing a trace
+ // generated on another VM.
+ // TODO(maruel): Guess the path automatically via traces containing the
+ // 'runtime' package, which is very frequent. This would be "less bad" than
+ // throwing up random values at the parser.
+ goroots = []string{runtime.GOROOT(), "c:/go", "/usr/lib/go", "/usr/local/go"}
+)
+
+// Similarity is the level at which two call lines arguments must match to be
+// considered similar enough to coalesce them.
+type Similarity int
+
+const (
+ // ExactFlags requires same bits (e.g. Locked).
+ ExactFlags Similarity = iota
+ // ExactLines requests the exact same arguments on the call line.
+ ExactLines
+ // AnyPointer considers different pointers a similar call line.
+ AnyPointer
+ // AnyValue accepts any value as similar call line.
+ AnyValue
+)
+
+// Function is a function call.
+//
+// Go stack traces print a mangled function call, this wrapper unmangle the
+// string before printing and adds other filtering methods.
+type Function struct {
+ Raw string
+}
+
+// String is the fully qualified function name.
+//
+// Sadly Go is a bit confused when the package name doesn't match the directory
+// containing the source file and will use the directory name instead of the
+// real package name.
+func (f Function) String() string {
+ s, _ := url.QueryUnescape(f.Raw)
+ return s
+}
+
+// Name is the naked function name.
+func (f Function) Name() string {
+ parts := strings.SplitN(filepath.Base(f.Raw), ".", 2)
+ if len(parts) == 1 {
+ return parts[0]
+ }
+ return parts[1]
+}
+
+// PkgName is the package name for this function reference.
+func (f Function) PkgName() string {
+ parts := strings.SplitN(filepath.Base(f.Raw), ".", 2)
+ if len(parts) == 1 {
+ return ""
+ }
+ s, _ := url.QueryUnescape(parts[0])
+ return s
+}
+
+// PkgDotName returns "." format.
+func (f Function) PkgDotName() string {
+ parts := strings.SplitN(filepath.Base(f.Raw), ".", 2)
+ s, _ := url.QueryUnescape(parts[0])
+ if len(parts) == 1 {
+ return parts[0]
+ }
+ if s != "" || parts[1] != "" {
+ return s + "." + parts[1]
+ }
+ return ""
+}
+
+// IsExported returns true if the function is exported.
+func (f Function) IsExported() bool {
+ name := f.Name()
+ parts := strings.Split(name, ".")
+ r, _ := utf8.DecodeRuneInString(parts[len(parts)-1])
+ if unicode.ToUpper(r) == r {
+ return true
+ }
+ return f.PkgName() == "main" && name == "main"
+}
+
+// Arg is an argument on a Call.
+type Arg struct {
+ Value uint64 // Value is the raw value as found in the stack trace
+ Name string // Name is a pseudo name given to the argument
+}
+
+// IsPtr returns true if we guess it's a pointer. It's only a guess, it can be
+// easily be confused by a bitmask.
+func (a *Arg) IsPtr() bool {
+ // Assumes all pointers are above 16Mb and positive.
+ return a.Value > 16*1024*1024 && a.Value < math.MaxInt64
+}
+
+func (a Arg) String() string {
+ if a.Name != "" {
+ return a.Name
+ }
+ if a.Value == 0 {
+ return "0"
+ }
+ return fmt.Sprintf("0x%x", a.Value)
+}
+
+// Args is a series of function call arguments.
+type Args struct {
+ Values []Arg // Values is the arguments as shown on the stack trace. They are mangled via simplification.
+ Processed []string // Processed is the arguments generated from processing the source files. It can have a length lower than Values.
+ Elided bool // If set, it means there was a trailing ", ..."
+}
+
+func (a Args) String() string {
+ var v []string
+ if len(a.Processed) != 0 {
+ v = make([]string, 0, len(a.Processed))
+ for _, item := range a.Processed {
+ v = append(v, item)
+ }
+ } else {
+ v = make([]string, 0, len(a.Values))
+ for _, item := range a.Values {
+ v = append(v, item.String())
+ }
+ }
+ if a.Elided {
+ v = append(v, "...")
+ }
+ return strings.Join(v, ", ")
+}
+
+// Equal returns true only if both arguments are exactly equal.
+func (a *Args) Equal(r *Args) bool {
+ if a.Elided != r.Elided || len(a.Values) != len(r.Values) {
+ return false
+ }
+ for i, l := range a.Values {
+ if l != r.Values[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Similar returns true if the two Args are equal or almost but not quite
+// equal.
+func (a *Args) Similar(r *Args, similar Similarity) bool {
+ if a.Elided != r.Elided || len(a.Values) != len(r.Values) {
+ return false
+ }
+ if similar == AnyValue {
+ return true
+ }
+ for i, l := range a.Values {
+ switch similar {
+ case ExactFlags, ExactLines:
+ if l != r.Values[i] {
+ return false
+ }
+ default:
+ if l.IsPtr() != r.Values[i].IsPtr() || (!l.IsPtr() && l != r.Values[i]) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Merge merges two similar Args, zapping out differences.
+func (a *Args) Merge(r *Args) Args {
+ out := Args{
+ Values: make([]Arg, len(a.Values)),
+ Elided: a.Elided,
+ }
+ for i, l := range a.Values {
+ if l != r.Values[i] {
+ out.Values[i].Name = "*"
+ out.Values[i].Value = l.Value
+ } else {
+ out.Values[i] = l
+ }
+ }
+ return out
+}
+
+// Call is an item in the stack trace.
+type Call struct {
+ SourcePath string // Full path name of the source file
+ Line int // Line number
+ Func Function // Fully qualified function name (encoded).
+ Args Args // Call arguments
+}
+
+// Equal returns true only if both calls are exactly equal.
+func (c *Call) Equal(r *Call) bool {
+ return c.SourcePath == r.SourcePath && c.Line == r.Line && c.Func == r.Func && c.Args.Equal(&r.Args)
+}
+
+// Similar returns true if the two Call are equal or almost but not quite
+// equal.
+func (c *Call) Similar(r *Call, similar Similarity) bool {
+ return c.SourcePath == r.SourcePath && c.Line == r.Line && c.Func == r.Func && c.Args.Similar(&r.Args, similar)
+}
+
+// Merge merges two similar Call, zapping out differences.
+func (c *Call) Merge(r *Call) Call {
+ return Call{
+ SourcePath: c.SourcePath,
+ Line: c.Line,
+ Func: c.Func,
+ Args: c.Args.Merge(&r.Args),
+ }
+}
+
+// SourceName returns the base file name of the source file.
+func (c *Call) SourceName() string {
+ return filepath.Base(c.SourcePath)
+}
+
+// SourceLine returns "source.go:line", including only the base file name.
+func (c *Call) SourceLine() string {
+ return fmt.Sprintf("%s:%d", c.SourceName(), c.Line)
+}
+
+// FullSourceLine returns "/path/to/source.go:line".
+func (c *Call) FullSourceLine() string {
+ return fmt.Sprintf("%s:%d", c.SourcePath, c.Line)
+}
+
+// PkgSource is one directory plus the file name of the source file.
+func (c *Call) PkgSource() string {
+ return filepath.Join(filepath.Base(filepath.Dir(c.SourcePath)), c.SourceName())
+}
+
+const testMainSource = "_test" + string(os.PathSeparator) + "_testmain.go"
+
+// IsStdlib returns true if it is a Go standard library function. This includes
+// the 'go test' generated main executable.
+func (c *Call) IsStdlib() bool {
+ for _, goroot := range goroots {
+ if strings.HasPrefix(c.SourcePath, goroot) {
+ return true
+ }
+ }
+ // Consider _test/_testmain.go as stdlib since it's injected by "go test".
+ return c.PkgSource() == testMainSource
+}
+
+// IsPkgMain returns true if it is in the main package.
+func (c *Call) IsPkgMain() bool {
+ return c.Func.PkgName() == "main"
+}
+
+// Stack is a call stack.
+type Stack struct {
+ Calls []Call // Call stack. First is original function, last is leaf function.
+ Elided bool // Happens when there's >100 items in Stack, currently hardcoded in package runtime.
+}
+
+// Equal returns true on if both call stacks are exactly equal.
+func (s *Stack) Equal(r *Stack) bool {
+ if len(s.Calls) != len(r.Calls) || s.Elided != r.Elided {
+ return false
+ }
+ for i := range s.Calls {
+ if !s.Calls[i].Equal(&r.Calls[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Similar returns true if the two Stack are equal or almost but not quite
+// equal.
+func (s *Stack) Similar(r *Stack, similar Similarity) bool {
+ if len(s.Calls) != len(r.Calls) || s.Elided != r.Elided {
+ return false
+ }
+ for i := range s.Calls {
+ if !s.Calls[i].Similar(&r.Calls[i], similar) {
+ return false
+ }
+ }
+ return true
+}
+
+// Merge merges two similar Stack, zapping out differences.
+func (s *Stack) Merge(r *Stack) *Stack {
+ // Assumes similar stacks have the same length.
+ out := &Stack{
+ Calls: make([]Call, len(s.Calls)),
+ Elided: s.Elided,
+ }
+ for i := range s.Calls {
+ out.Calls[i] = s.Calls[i].Merge(&r.Calls[i])
+ }
+ return out
+}
+
+// Less compares two Stack, where the ones that are less are more
+// important, so they come up front. A Stack with more private functions is
+// 'less' so it is at the top. Inversely, a Stack with only public
+// functions is 'more' so it is at the bottom.
+func (s *Stack) Less(r *Stack) bool {
+ lStdlib := 0
+ lPrivate := 0
+ for _, c := range s.Calls {
+ if c.IsStdlib() {
+ lStdlib++
+ } else {
+ lPrivate++
+ }
+ }
+ rStdlib := 0
+ rPrivate := 0
+ for _, s := range r.Calls {
+ if s.IsStdlib() {
+ rStdlib++
+ } else {
+ rPrivate++
+ }
+ }
+ if lPrivate > rPrivate {
+ return true
+ }
+ if lPrivate < rPrivate {
+ return false
+ }
+ if lStdlib > rStdlib {
+ return false
+ }
+ if lStdlib < rStdlib {
+ return true
+ }
+
+ // Stack lengths are the same.
+ for x := range s.Calls {
+ if s.Calls[x].Func.Raw < r.Calls[x].Func.Raw {
+ return true
+ }
+ if s.Calls[x].Func.Raw > r.Calls[x].Func.Raw {
+ return true
+ }
+ if s.Calls[x].PkgSource() < r.Calls[x].PkgSource() {
+ return true
+ }
+ if s.Calls[x].PkgSource() > r.Calls[x].PkgSource() {
+ return true
+ }
+ if s.Calls[x].Line < r.Calls[x].Line {
+ return true
+ }
+ if s.Calls[x].Line > r.Calls[x].Line {
+ return true
+ }
+ }
+ return false
+}
+
+// Signature represents the signature of one or multiple goroutines.
+//
+// It is effectively the stack trace plus the goroutine internal bits, like
+// it's state, if it is thread locked, which call site created this goroutine,
+// etc.
+type Signature struct {
+ // Use git grep 'gopark(|unlock)\(' to find them all plus everything listed
+ // in runtime/traceback.go. Valid values includes:
+ // - chan send, chan receive, select
+ // - finalizer wait, mark wait (idle),
+ // - Concurrent GC wait, GC sweep wait, force gc (idle)
+ // - IO wait, panicwait
+ // - semacquire, semarelease
+ // - sleep, timer goroutine (idle)
+ // - trace reader (blocked)
+ // Stuck cases:
+ // - chan send (nil chan), chan receive (nil chan), select (no cases)
+ // Runnable states:
+ // - idle, runnable, running, syscall, waiting, dead, enqueue, copystack,
+ // Scan states:
+ // - scan, scanrunnable, scanrunning, scansyscall, scanwaiting, scandead,
+ // scanenqueue
+ State string
+ CreatedBy Call // Which other goroutine which created this one.
+ SleepMin int // Wait time in minutes, if applicable.
+ SleepMax int // Wait time in minutes, if applicable.
+ Stack Stack
+ Locked bool // Locked to an OS thread.
+}
+
+// Equal returns true only if both signatures are exactly equal.
+func (s *Signature) Equal(r *Signature) bool {
+ if s.State != r.State || !s.CreatedBy.Equal(&r.CreatedBy) || s.Locked != r.Locked || s.SleepMin != r.SleepMin || s.SleepMax != r.SleepMax {
+ return false
+ }
+ return s.Stack.Equal(&r.Stack)
+}
+
+// Similar returns true if the two Signature are equal or almost but not quite
+// equal.
+func (s *Signature) Similar(r *Signature, similar Similarity) bool {
+ if s.State != r.State || !s.CreatedBy.Similar(&r.CreatedBy, similar) {
+ return false
+ }
+ if similar == ExactFlags && s.Locked != r.Locked {
+ return false
+ }
+ return s.Stack.Similar(&r.Stack, similar)
+}
+
+// Merge merges two similar Signature, zapping out differences.
+func (s *Signature) Merge(r *Signature) *Signature {
+ min := s.SleepMin
+ if r.SleepMin < min {
+ min = r.SleepMin
+ }
+ max := s.SleepMax
+ if r.SleepMax > max {
+ max = r.SleepMax
+ }
+ return &Signature{
+ State: s.State, // Drop right side.
+ CreatedBy: s.CreatedBy, // Drop right side.
+ SleepMin: min,
+ SleepMax: max,
+ Stack: *s.Stack.Merge(&r.Stack),
+ Locked: s.Locked || r.Locked, // TODO(maruel): This is weirdo.
+ }
+}
+
+// Less compares two Signature, where the ones that are less are more
+// important, so they come up front. A Signature with more private functions is
+// 'less' so it is at the top. Inversely, a Signature with only public
+// functions is 'more' so it is at the bottom.
+func (s *Signature) Less(r *Signature) bool {
+ if s.Stack.Less(&r.Stack) {
+ return true
+ }
+ if r.Stack.Less(&s.Stack) {
+ return false
+ }
+ if s.Locked && !r.Locked {
+ return true
+ }
+ if r.Locked && !s.Locked {
+ return false
+ }
+ if s.State < r.State {
+ return true
+ }
+ if s.State > r.State {
+ return false
+ }
+ return false
+}
+
+// Goroutine represents the state of one goroutine, including the stack trace.
+type Goroutine struct {
+ Signature // It's stack trace, internal bits, state, which call site created it, etc.
+ ID int // Goroutine ID.
+ First bool // First is the goroutine first printed, normally the one that crashed.
+}
+
+// Bucketize returns the number of similar goroutines.
+func Bucketize(goroutines []Goroutine, similar Similarity) map[*Signature][]Goroutine {
+ out := map[*Signature][]Goroutine{}
+ // O(n²). Fix eventually.
+ for _, routine := range goroutines {
+ found := false
+ for key := range out {
+ // When a match is found, this effectively drops the other goroutine ID.
+ if key.Similar(&routine.Signature, similar) {
+ found = true
+ if !key.Equal(&routine.Signature) {
+ // Almost but not quite equal. There's different pointers passed
+ // around but the same values. Zap out the different values.
+ newKey := key.Merge(&routine.Signature)
+ out[newKey] = append(out[key], routine)
+ delete(out, key)
+ } else {
+ out[key] = append(out[key], routine)
+ }
+ break
+ }
+ }
+ if !found {
+ key := &Signature{}
+ *key = routine.Signature
+ out[key] = []Goroutine{routine}
+ }
+ }
+ return out
+}
+
+// Bucket is a stack trace signature and the list of goroutines that fits this
+// signature.
+type Bucket struct {
+ Signature
+ Routines []Goroutine
+}
+
+// First returns true if it contains the first goroutine, e.g. the ones that
+// likely generated the panic() call, if any.
+func (b *Bucket) First() bool {
+ for _, r := range b.Routines {
+ if r.First {
+ return true
+ }
+ }
+ return false
+}
+
+// Less does reverse sort.
+func (b *Bucket) Less(r *Bucket) bool {
+ if b.First() {
+ return true
+ }
+ if r.First() {
+ return false
+ }
+ return b.Signature.Less(&r.Signature)
+}
+
+// Buckets is a list of Bucket sorted by repeation count.
+type Buckets []Bucket
+
+func (b Buckets) Len() int {
+ return len(b)
+}
+
+func (b Buckets) Less(i, j int) bool {
+ return b[i].Less(&b[j])
+}
+
+func (b Buckets) Swap(i, j int) {
+ b[j], b[i] = b[i], b[j]
+}
+
+// SortBuckets creates a list of Bucket from each goroutine stack trace count.
+func SortBuckets(buckets map[*Signature][]Goroutine) Buckets {
+ out := make(Buckets, 0, len(buckets))
+ for signature, count := range buckets {
+ out = append(out, Bucket{*signature, count})
+ }
+ sort.Sort(out)
+ return out
+}
+
+// scanLines is similar to bufio.ScanLines except that it:
+// - doesn't drop '\n'
+// - doesn't strip '\r'
+// - returns when the data is bufio.MaxScanTokenSize bytes
+func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ if i := bytes.IndexByte(data, '\n'); i >= 0 {
+ return i + 1, data[0 : i+1], nil
+ }
+ if atEOF {
+ return len(data), data, nil
+ }
+ if len(data) >= bufio.MaxScanTokenSize {
+ // Returns the line even if it is not at EOF nor has a '\n', otherwise the
+ // scanner will return bufio.ErrTooLong which is definitely not what we
+ // want.
+ return len(data), data, nil
+ }
+ return 0, nil, nil
+}
+
+// ParseDump processes the output from runtime.Stack().
+//
+// It supports piping from another command and assumes there is junk before the
+// actual stack trace. The junk is streamed to out.
+func ParseDump(r io.Reader, out io.Writer) ([]Goroutine, error) {
+ goroutines := make([]Goroutine, 0, 16)
+ var goroutine *Goroutine
+ scanner := bufio.NewScanner(r)
+ scanner.Split(scanLines)
+ // TODO(maruel): Use a formal state machine. Patterns follows:
+ // - reRoutineHeader
+ // Either:
+ // - reUnavail
+ // - reFunc + reFile in a loop
+ // - reElided
+ // Optionally ends with:
+ // - reCreated + reFile
+ // Between each goroutine stack dump: an empty line
+ created := false
+ // firstLine is the first line after the reRoutineHeader header line.
+ firstLine := false
+ for scanner.Scan() {
+ line := scanner.Text()
+ if line == "\n" {
+ if goroutine != nil {
+ goroutine = nil
+ continue
+ }
+ } else if line[len(line)-1] == '\n' {
+ if goroutine == nil {
+ if match := reRoutineHeader.FindStringSubmatch(line); match != nil {
+ if id, err := strconv.Atoi(match[1]); err == nil {
+ // See runtime/traceback.go.
+ // ", \d+ minutes, locked to thread"
+ items := strings.Split(match[2], ", ")
+ sleep := 0
+ locked := false
+ for i := 1; i < len(items); i++ {
+ if items[i] == lockedToThread {
+ locked = true
+ continue
+ }
+ // Look for duration, if any.
+ if match2 := reMinutes.FindStringSubmatch(items[i]); match2 != nil {
+ sleep, _ = strconv.Atoi(match2[1])
+ }
+ }
+ goroutines = append(goroutines, Goroutine{
+ Signature: Signature{
+ State: items[0],
+ SleepMin: sleep,
+ SleepMax: sleep,
+ Locked: locked,
+ },
+ ID: id,
+ First: len(goroutines) == 0,
+ })
+ goroutine = &goroutines[len(goroutines)-1]
+ firstLine = true
+ continue
+ }
+ }
+ } else {
+ if firstLine {
+ firstLine = false
+ if match := reUnavail.FindStringSubmatch(line); match != nil {
+ // Generate a fake stack entry.
+ goroutine.Stack.Calls = []Call{{SourcePath: ""}}
+ continue
+ }
+ }
+
+ if match := reFile.FindStringSubmatch(line); match != nil {
+ // Triggers after a reFunc or a reCreated.
+ num, err := strconv.Atoi(match[2])
+ if err != nil {
+ return goroutines, fmt.Errorf("failed to parse int on line: \"%s\"", line)
+ }
+ if created {
+ created = false
+ goroutine.CreatedBy.SourcePath = match[1]
+ goroutine.CreatedBy.Line = num
+ } else {
+ i := len(goroutine.Stack.Calls) - 1
+ if i < 0 {
+ return goroutines, errors.New("unexpected order")
+ }
+ goroutine.Stack.Calls[i].SourcePath = match[1]
+ goroutine.Stack.Calls[i].Line = num
+ }
+ continue
+ }
+
+ if match := reCreated.FindStringSubmatch(line); match != nil {
+ created = true
+ goroutine.CreatedBy.Func.Raw = match[1]
+ continue
+ }
+
+ if match := reFunc.FindStringSubmatch(line); match != nil {
+ args := Args{}
+ for _, a := range strings.Split(match[2], ", ") {
+ if a == "..." {
+ args.Elided = true
+ continue
+ }
+ if a == "" {
+ // Remaining values were dropped.
+ break
+ }
+ v, err := strconv.ParseUint(a, 0, 64)
+ if err != nil {
+ return goroutines, fmt.Errorf("failed to parse int on line: \"%s\"", line)
+ }
+ args.Values = append(args.Values, Arg{Value: v})
+ }
+ goroutine.Stack.Calls = append(goroutine.Stack.Calls, Call{Func: Function{match[1]}, Args: args})
+ continue
+ }
+
+ if match := reElided.FindStringSubmatch(line); match != nil {
+ goroutine.Stack.Elided = true
+ continue
+ }
+ }
+ }
+ _, _ = io.WriteString(out, line)
+ goroutine = nil
+ }
+ nameArguments(goroutines)
+ return goroutines, scanner.Err()
+}
+
+// Private stuff.
+
+func nameArguments(goroutines []Goroutine) {
+ // Set a name for any pointer occuring more than once.
+ type object struct {
+ args []*Arg
+ inPrimary bool
+ id int
+ }
+ objects := map[uint64]object{}
+ // Enumerate all the arguments.
+ for i := range goroutines {
+ for j := range goroutines[i].Stack.Calls {
+ for k := range goroutines[i].Stack.Calls[j].Args.Values {
+ arg := goroutines[i].Stack.Calls[j].Args.Values[k]
+ if arg.IsPtr() {
+ objects[arg.Value] = object{
+ args: append(objects[arg.Value].args, &goroutines[i].Stack.Calls[j].Args.Values[k]),
+ inPrimary: objects[arg.Value].inPrimary || i == 0,
+ }
+ }
+ }
+ }
+ // CreatedBy.Args is never set.
+ }
+ order := uint64Slice{}
+ for k, obj := range objects {
+ if len(obj.args) > 1 && obj.inPrimary {
+ order = append(order, k)
+ }
+ }
+ sort.Sort(order)
+ nextID := 1
+ for _, k := range order {
+ for _, arg := range objects[k].args {
+ arg.Name = fmt.Sprintf("#%d", nextID)
+ }
+ nextID++
+ }
+
+ // Now do the rest. This is done so the output is deterministic.
+ order = uint64Slice{}
+ for k := range objects {
+ order = append(order, k)
+ }
+ sort.Sort(order)
+ for _, k := range order {
+ // Process the remaining pointers, they were not referenced by primary
+ // thread so will have higher IDs.
+ if objects[k].inPrimary {
+ continue
+ }
+ for _, arg := range objects[k].args {
+ arg.Name = fmt.Sprintf("#%d", nextID)
+ }
+ nextID++
+ }
+}
+
+type uint64Slice []uint64
+
+func (a uint64Slice) Len() int { return len(a) }
+func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] }
diff --git a/vendor/github.com/maruel/panicparse/stack/ui.go b/vendor/github.com/maruel/panicparse/stack/ui.go
new file mode 100644
index 0000000..b125fc9
--- /dev/null
+++ b/vendor/github.com/maruel/panicparse/stack/ui.go
@@ -0,0 +1,139 @@
+// Copyright 2016 Marc-Antoine Ruel. All rights reserved.
+// Use of this source code is governed under the Apache License, Version 2.0
+// that can be found in the LICENSE file.
+
+package stack
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Palette defines the color used.
+//
+// An empty object Palette{} can be used to disable coloring.
+type Palette struct {
+ EOLReset string
+
+ // Routine header.
+ RoutineFirst string // The first routine printed.
+ Routine string // Following routines.
+ CreatedBy string
+
+ // Call line.
+ Package string
+ SourceFile string
+ FunctionStdLib string
+ FunctionStdLibExported string
+ FunctionMain string
+ FunctionOther string
+ FunctionOtherExported string
+ Arguments string
+}
+
+// CalcLengths returns the maximum length of the source lines and package names.
+func CalcLengths(buckets Buckets, fullPath bool) (int, int) {
+ srcLen := 0
+ pkgLen := 0
+ for _, bucket := range buckets {
+ for _, line := range bucket.Signature.Stack.Calls {
+ l := 0
+ if fullPath {
+ l = len(line.FullSourceLine())
+ } else {
+ l = len(line.SourceLine())
+ }
+ if l > srcLen {
+ srcLen = l
+ }
+ l = len(line.Func.PkgName())
+ if l > pkgLen {
+ pkgLen = l
+ }
+ }
+ }
+ return srcLen, pkgLen
+}
+
+// functionColor returns the color to be used for the function name based on
+// the type of package the function is in.
+func (p *Palette) functionColor(line *Call) string {
+ if line.IsStdlib() {
+ if line.Func.IsExported() {
+ return p.FunctionStdLibExported
+ }
+ return p.FunctionStdLib
+ } else if line.IsPkgMain() {
+ return p.FunctionMain
+ } else if line.Func.IsExported() {
+ return p.FunctionOtherExported
+ }
+ return p.FunctionOther
+}
+
+// routineColor returns the color for the header of the goroutines bucket.
+func (p *Palette) routineColor(bucket *Bucket, multipleBuckets bool) string {
+ if bucket.First() && multipleBuckets {
+ return p.RoutineFirst
+ }
+ return p.Routine
+}
+
+// BucketHeader prints the header of a goroutine signature.
+func (p *Palette) BucketHeader(bucket *Bucket, fullPath, multipleBuckets bool) string {
+ extra := ""
+ if bucket.SleepMax != 0 {
+ if bucket.SleepMin != bucket.SleepMax {
+ extra += fmt.Sprintf(" [%d~%d minutes]", bucket.SleepMin, bucket.SleepMax)
+ } else {
+ extra += fmt.Sprintf(" [%d minutes]", bucket.SleepMax)
+ }
+ }
+ if bucket.Locked {
+ extra += " [locked]"
+ }
+ created := bucket.CreatedBy.Func.PkgDotName()
+ if created != "" {
+ created += " @ "
+ if fullPath {
+ created += bucket.CreatedBy.FullSourceLine()
+ } else {
+ created += bucket.CreatedBy.SourceLine()
+ }
+ extra += p.CreatedBy + " [Created by " + created + "]"
+ }
+ return fmt.Sprintf(
+ "%s%d: %s%s%s\n",
+ p.routineColor(bucket, multipleBuckets), len(bucket.Routines),
+ bucket.State, extra,
+ p.EOLReset)
+}
+
+// callLine prints one stack line.
+func (p *Palette) callLine(line *Call, srcLen, pkgLen int, fullPath bool) string {
+ src := ""
+ if fullPath {
+ src = line.FullSourceLine()
+ } else {
+ src = line.SourceLine()
+ }
+ return fmt.Sprintf(
+ " %s%-*s %s%-*s %s%s%s(%s)%s",
+ p.Package, pkgLen, line.Func.PkgName(),
+ p.SourceFile, srcLen, src,
+ p.functionColor(line), line.Func.Name(),
+ p.Arguments, line.Args,
+ p.EOLReset)
+}
+
+// StackLines prints one complete stack trace, without the header.
+func (p *Palette) StackLines(signature *Signature, srcLen, pkgLen int, fullPath bool) string {
+ out := make([]string, len(signature.Stack.Calls))
+ for i := range signature.Stack.Calls {
+ out[i] = p.callLine(&signature.Stack.Calls[i], srcLen, pkgLen, fullPath)
+ }
+ if signature.Stack.Elided {
+ out = append(out, " (...)")
+ }
+ return strings.Join(out, "\n") + "\n"
+}
diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
new file mode 100644
index 0000000..2298515
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md
new file mode 100644
index 0000000..60ae311
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/README.md
@@ -0,0 +1,39 @@
+# go-wordwrap
+
+`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that
+automatically wraps words into multiple lines. The primary use case for this
+is in formatting CLI output, but of course word wrapping is a generally useful
+thing to do.
+
+## Installation and Usage
+
+Install using `go get github.com/mitchellh/go-wordwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/mitchellh/go-wordwrap
+
+Below is an example of its usage ignoring errors:
+
+```go
+wrapped := wordwrap.WrapString("foo bar baz", 3)
+fmt.Println(wrapped)
+```
+
+Would output:
+
+```
+foo
+bar
+baz
+```
+
+## Word Wrap Algorithm
+
+This library doesn't use any clever algorithm for word wrapping. The wrapping
+is actually very naive: whenever there is whitespace or an explicit linebreak.
+The goal of this library is for word wrapping CLI output, so the input is
+typically pretty well controlled human language. Because of this, the naive
+approach typically works just fine.
+
+In the future, we'd like to make the algorithm more advanced. We would do
+so without breaking the API.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
new file mode 100644
index 0000000..ac67205
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
@@ -0,0 +1,73 @@
+package wordwrap
+
+import (
+ "bytes"
+ "unicode"
+)
+
+// WrapString wraps the given string within lim width in characters.
+//
+// Wrapping is currently naive and only happens at white-space. A future
+// version of the library will implement smarter wrapping. This means that
+// pathological cases can dramatically reach past the limit, such as a very
+// long word.
+func WrapString(s string, lim uint) string {
+ // Initialize a buffer with a slightly larger size to account for breaks
+ init := make([]byte, 0, len(s))
+ buf := bytes.NewBuffer(init)
+
+ var current uint
+ var wordBuf, spaceBuf bytes.Buffer
+
+ for _, char := range s {
+ if char == '\n' {
+ if wordBuf.Len() == 0 {
+ if current+uint(spaceBuf.Len()) > lim {
+ current = 0
+ } else {
+ current += uint(spaceBuf.Len())
+ spaceBuf.WriteTo(buf)
+ }
+ spaceBuf.Reset()
+ } else {
+ current += uint(spaceBuf.Len() + wordBuf.Len())
+ spaceBuf.WriteTo(buf)
+ spaceBuf.Reset()
+ wordBuf.WriteTo(buf)
+ wordBuf.Reset()
+ }
+ buf.WriteRune(char)
+ current = 0
+ } else if unicode.IsSpace(char) {
+ if spaceBuf.Len() == 0 || wordBuf.Len() > 0 {
+ current += uint(spaceBuf.Len() + wordBuf.Len())
+ spaceBuf.WriteTo(buf)
+ spaceBuf.Reset()
+ wordBuf.WriteTo(buf)
+ wordBuf.Reset()
+ }
+
+ spaceBuf.WriteRune(char)
+ } else {
+
+ wordBuf.WriteRune(char)
+
+ if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim {
+ buf.WriteRune('\n')
+ current = 0
+ spaceBuf.Reset()
+ }
+ }
+ }
+
+ if wordBuf.Len() == 0 {
+ if current+uint(spaceBuf.Len()) <= lim {
+ spaceBuf.WriteTo(buf)
+ }
+ } else {
+ spaceBuf.WriteTo(buf)
+ wordBuf.WriteTo(buf)
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/mvdan/xurls/LICENSE b/vendor/github.com/mvdan/xurls/LICENSE
new file mode 100644
index 0000000..7d71d51
--- /dev/null
+++ b/vendor/github.com/mvdan/xurls/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Daniel Martí. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of the copyright holder nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/mvdan/xurls/README.md b/vendor/github.com/mvdan/xurls/README.md
new file mode 100644
index 0000000..2051d14
--- /dev/null
+++ b/vendor/github.com/mvdan/xurls/README.md
@@ -0,0 +1,40 @@
+# xurls
+
+[![GoDoc](https://godoc.org/github.com/mvdan/xurls?status.svg)](https://godoc.org/github.com/mvdan/xurls)
+[![Travis](https://travis-ci.org/mvdan/xurls.svg?branch=master)](https://travis-ci.org/mvdan/xurls)
+
+Extract urls from text using regular expressions.
+
+ go get -u github.com/mvdan/xurls
+
+```go
+import "github.com/mvdan/xurls"
+
+func main() {
+ xurls.Relaxed.FindString("Do gophers live in golang.org?")
+ // "golang.org"
+ xurls.Strict.FindAllString("foo.com is http://foo.com/.", -1)
+ // []string{"http://foo.com/"}
+}
+```
+
+`Relaxed` is around five times slower than `Strict` since it does more
+work to find the URLs without relying on the scheme:
+
+```
+BenchmarkStrictEmpty-4 1000000 1885 ns/op
+BenchmarkStrictSingle-4 200000 8356 ns/op
+BenchmarkStrictMany-4 100000 22547 ns/op
+BenchmarkRelaxedEmpty-4 200000 7284 ns/op
+BenchmarkRelaxedSingle-4 30000 58557 ns/op
+BenchmarkRelaxedMany-4 10000 130251 ns/op
+```
+
+#### cmd/xurls
+
+ go get -u github.com/mvdan/xurls/cmd/xurls
+
+```shell
+$ echo "Do gophers live in http://golang.org?" | xurls
+http://golang.org
+```
diff --git a/vendor/github.com/mvdan/xurls/regex.go b/vendor/github.com/mvdan/xurls/regex.go
new file mode 100644
index 0000000..2b4601e
--- /dev/null
+++ b/vendor/github.com/mvdan/xurls/regex.go
@@ -0,0 +1,8 @@
+// Generated by regexgen
+
+package xurls
+
+const (
+ gtld = `(?i)(aaa|aarp|abarth|abb|abbott|abbvie|abc|able|abogado|abudhabi|ac|academy|accenture|accountant|accountants|aco|active|actor|ad|adac|ads|adult|ae|aeg|aero|aetna|af|afamilycompany|afl|africa|ag|agakhan|agency|ai|aig|aigo|airbus|airforce|airtel|akdn|al|alfaromeo|alibaba|alipay|allfinanz|allstate|ally|alsace|alstom|am|americanexpress|americanfamily|amex|amfam|amica|amsterdam|analytics|android|anquan|anz|ao|aol|apartments|app|apple|aq|aquarelle|ar|arab|aramco|archi|army|arpa|art|arte|as|asda|asia|associates|at|athleta|attorney|au|auction|audi|audible|audio|auspost|author|auto|autos|avianca|aw|aws|ax|axa|az|azure|ba|baby|baidu|banamex|bananarepublic|band|bank|bar|barcelona|barclaycard|barclays|barefoot|bargains|baseball|basketball|bauhaus|bayern|bb|bbc|bbt|bbva|bcg|bcn|bd|be|beats|beauty|beer|bentley|berlin|best|bestbuy|bet|bf|bg|bh|bharti|bi|bible|bid|bike|bing|bingo|bio|bit|biz|bj|black|blackfriday|blanco|blockbuster|blog|bloomberg|blue|bm|bms|bmw|bn|bnl|bnpparibas|bo|boats|boehringer|bofa|bom|bond|boo|book|booking|boots|bosch|bostik|boston|bot|boutique|box|br|bradesco|bridgestone|broadway|broker|brother|brussels|bs|bt|budapest|bugatti|build|builders|business|buy|buzz|bv|bw|by|bz|bzh|ca|cab|cafe|cal|call|calvinklein|cam|camera|camp|cancerresearch|canon|capetown|capital|capitalone|car|caravan|cards|care|career|careers|cars|cartier|casa|case|caseih|cash|casino|cat|catering|catholic|cba|cbn|cbre|cbs|cc|cd|ceb|center|ceo|cern|cf|cfa|cfd|cg|ch|chanel|channel|chase|chat|cheap|chintai|chloe|christmas|chrome|chrysler|church|ci|cipriani|circle|cisco|citadel|citi|citic|city|cityeats|ck|cl|claims|cleaning|click|clinic|clinique|clothing|cloud|club|clubmed|cm|cn|co|coach|codes|coffee|college|cologne|com|comcast|commbank|community|company|compare|computer|comsec|condos|construction|consulting|contact|contractors|cooking|cookingchannel|cool|coop|corsica|country|coupon|coupons|courses|cr|credit|creditcard|creditunion|cricket|crown|crs|cruise|cruises|csc|cu|cuisinella|cv|cw|cx|cy|cymru|cyou|cz|dabur|dad|dance|data|date|dating|datsun|day|dclk|dds|de|deal|dealer|deals|degree|delivery|dell|deloitte|delta|democrat|dental|dentist|desi|design|dev|dhl|diamonds|diet|digital|direct|directory|discount|discover|dish|diy|dj|dk|dm|dnp|do|docs|doctor|dodge|dog|doha|domains|dot|download|drive|dtv|dubai|duck|dunlop|duns|dupont|durban|dvag|dvr|dwg|dz|earth|eat|ec|eco|edeka|edu|education|ee|eg|email|emerck|energy|engineer|engineering|enterprises|epost|epson|equipment|er|ericsson|erni|es|esq|estate|esurance|et|etisalat|eu|eurovision|eus|events|everbank|example|exchange|exit|expert|exposed|express|extraspace|fage|fail|fairwinds|faith|family|fan|fans|farm|farmers|fashion|fast|fedex|feedback|ferrari|ferrero|fi|fiat|fidelity|fido|film|final|finance|financial|fire|firestone|firmdale|fish|fishing|fit|fitness|fj|fk|flickr|flights|flir|florist|flowers|fly|fm|fo|foo|food|foodnetwork|football|ford|forex|forsale|forum|foundation|fox|fr|free|fresenius|frl|frogans|frontdoor|frontier|ftr|fujitsu|fujixerox|fun|fund|furniture|futbol|fyi|ga|gal|gallery|gallo|gallup|game|games|gap|garden|gb|gbiz|gd|gdn|ge|gea|gent|genting|george|gf|gg|ggee|gh|gi|gift|gifts|gives|giving|gl|glade|glass|gle|global|globo|gm|gmail|gmbh|gmo|gmx|gn|gnu|godaddy|gold|goldpoint|golf|goo|goodhands|goodyear|goog|google|gop|got|gov|gp|gq|gr|grainger|graphics|gratis|green|gripe|grocery|group|gs|gt|gu|guardian|gucci|guge|guide|guitars|guru|gw|gy|hair|hamburg|hangout|haus|hbo|hdfc|hdfcbank|health|healthcare|help|helsinki|here|hermes|hgtv|hiphop|hisamitsu|hitachi|hiv|hk|hkt|hm|hn|hockey|holdings|holiday|homedepot|homegoods|homes|homesense|honda|honeywell|horse|hospital|host|hosting|hot|hoteles|hotels|hotmail|house|how|hr|hsbc|ht|htc|hu|hughes|hyatt|hyundai|i2p|ibm|icbc|ice|icu|id|ie|ieee|ifm|iinet|ikano|il|im|imamat|imdb|immo|immobilien|in|industries|infiniti|info|ing|ink|institute|insurance|insure|int|intel|international|intuit|invalid|investments|io|ipiranga|iq|ir|irish|is|iselect|ismaili|ist|istanbul|it|itau|itv|iveco|iwc|jaguar|java|jcb|jcp|je|jeep|jetzt|jewelry|jio|jlc|jll|jm|jmp|jnj|jo|jobs|joburg|jot|joy|jp|jpmorgan|jprs|juegos|juniper|kaufen|kddi|ke|kerryhotels|kerrylogistics|kerryproperties|kfh|kg|kh|ki|kia|kim|kinder|kindle|kitchen|kiwi|km|kn|koeln|komatsu|kosher|kp|kpmg|kpn|kr|krd|kred|kuokgroup|kw|ky|kyoto|kz|la|lacaixa|ladbrokes|lamborghini|lamer|lancaster|lancia|lancome|land|landrover|lanxess|lasalle|lat|latino|latrobe|law|lawyer|lb|lc|lds|lease|leclerc|lefrak|legal|lego|lexus|lgbt|li|liaison|lidl|life|lifeinsurance|lifestyle|lighting|like|lilly|limited|limo|lincoln|linde|link|lipsy|live|living|lixil|lk|loan|loans|local|localhost|locker|locus|loft|lol|london|lotte|lotto|love|lpl|lplfinancial|lr|ls|lt|ltd|ltda|lu|lundbeck|lupin|luxe|luxury|lv|ly|ma|macys|madrid|maif|maison|makeup|man|management|mango|map|market|marketing|markets|marriott|marshalls|maserati|mattel|mba|mc|mcd|mcdonalds|mckinsey|md|me|med|media|meet|melbourne|meme|memorial|men|menu|meo|merckmsd|metlife|mg|mh|miami|microsoft|mil|mini|mint|mit|mitsubishi|mk|ml|mlb|mls|mm|mma|mn|mo|mobi|mobile|mobily|moda|moe|moi|mom|monash|money|monster|montblanc|mopar|mormon|mortgage|moscow|moto|motorcycles|mov|movie|movistar|mp|mq|mr|ms|msd|mt|mtn|mtpc|mtr|mu|museum|mutual|mutuelle|mv|mw|mx|my|mz|na|nab|nadex|nagoya|name|nationwide|natura|navy|nba|nc|ne|nec|net|netbank|netflix|network|neustar|new|newholland|news|next|nextdirect|nexus|nf|nfl|ng|ngo|nhk|ni|nico|nike|nikon|ninja|nissan|nissay|nl|no|nokia|northwesternmutual|norton|now|nowruz|nowtv|np|nr|nra|nrw|ntt|nu|nyc|nz|obi|observer|off|office|okinawa|olayan|olayangroup|oldnavy|ollo|om|omega|one|ong|onion|onl|online|onyourside|ooo|open|oracle|orange|org|organic|orientexpress|origins|osaka|otsuka|ott|ovh|pa|page|pamperedchef|panasonic|panerai|paris|pars|partners|parts|party|passagens|pay|pccw|pe|pet|pf|pfizer|pg|ph|pharmacy|phd|philips|phone|photo|photography|photos|physio|piaget|pics|pictet|pictures|pid|pin|ping|pink|pioneer|pizza|pk|pl|place|play|playstation|plumbing|plus|pm|pn|pnc|pohl|poker|politie|porn|post|pr|pramerica|praxi|press|prime|pro|prod|productions|prof|progressive|promo|properties|property|protection|pru|prudential|ps|pt|pub|pw|pwc|py|qa|qpon|quebec|quest|qvc|racing|radio|raid|re|read|realestate|realtor|realty|recipes|red|redstone|redumbrella|rehab|reise|reisen|reit|reliance|ren|rent|rentals|repair|report|republican|rest|restaurant|review|reviews|rexroth|rich|richardli|ricoh|rightathome|ril|rio|rip|rmit|ro|rocher|rocks|rodeo|rogers|room|rs|rsvp|ru|ruhr|run|rw|rwe|ryukyu|sa|saarland|safe|safety|sakura|sale|salon|samsclub|samsung|sandvik|sandvikcoromant|sanofi|sap|sapo|sarl|sas|save|saxo|sb|sbi|sbs|sc|sca|scb|schaeffler|schmidt|scholarships|school|schule|schwarz|science|scjohnson|scor|scot|sd|se|search|seat|secure|security|seek|select|sener|services|ses|seven|sew|sex|sexy|sfr|sg|sh|shangrila|sharp|shaw|shell|shia|shiksha|shoes|shop|shopping|shouji|show|showtime|shriram|si|silk|sina|singles|site|sj|sk|ski|skin|sky|skype|sl|sling|sm|smart|smile|sn|sncf|so|soccer|social|softbank|software|sohu|solar|solutions|song|sony|soy|space|spiegel|spot|spreadbetting|sr|srl|srt|st|stada|staples|star|starhub|statebank|statefarm|statoil|stc|stcgroup|stockholm|storage|store|stream|studio|study|style|su|sucks|supplies|supply|support|surf|surgery|suzuki|sv|swatch|swiftcover|swiss|sx|sy|sydney|symantec|systems|sz|tab|taipei|talk|taobao|target|tatamotors|tatar|tattoo|tax|taxi|tc|tci|td|tdk|team|tech|technology|tel|telecity|telefonica|temasek|tennis|test|teva|tf|tg|th|thd|theater|theatre|theguardian|tiaa|tickets|tienda|tiffany|tips|tires|tirol|tj|tjmaxx|tjx|tk|tkmaxx|tl|tm|tmall|tn|to|today|tokyo|tools|top|toray|toshiba|total|tours|town|toyota|toys|tr|trade|trading|training|travel|travelchannel|travelers|travelersinsurance|trust|trv|tt|tube|tui|tunes|tushu|tv|tvs|tw|tz|ua|ubank|ubs|uconnect|ug|uk|unicom|university|uno|uol|ups|us|uy|uz|va|vacations|vana|vanguard|vc|ve|vegas|ventures|verisign|vermögensberater|vermögensberatung|versicherung|vet|vg|vi|viajes|video|vig|viking|villas|vin|vip|virgin|visa|vision|vista|vistaprint|viva|vivo|vlaanderen|vn|vodka|volkswagen|volvo|vote|voting|voto|voyage|vu|vuelos|wales|walmart|walter|wang|wanggou|warman|watch|watches|weather|weatherchannel|webcam|weber|website|wed|wedding|weibo|weir|wf|whoswho|wien|wiki|williamhill|win|windows|wine|winners|wme|wolterskluwer|woodside|work|works|world|wow|ws|wtc|wtf|xbox|xerox|xfinity|xihuan|xin|xn--11b4c3d|xn--1ck2e1b|xn--1qqw23a|xn--30rr7y|xn--3bst00m|xn--3ds443g|xn--3e0b707e|xn--3oq18vl8pn36a|xn--3pxu8k|xn--42c2d9a|xn--45brj9c|xn--45q11c|xn--4gbrim|xn--4gq48lf9j|xn--54b7fta0cc|xn--55qw42g|xn--55qx5d|xn--5su34j936bgsg|xn--5tzm5g|xn--6frz82g|xn--6qq986b3xl|xn--80adxhks|xn--80ao21a|xn--80aqecdr1a|xn--80asehdb|xn--80aswg|xn--8y0a063a|xn--90a3ac|xn--90ais|xn--9dbq2a|xn--9et52u|xn--9krt00a|xn--b4w605ferd|xn--bck1b9a5dre4c|xn--c1avg|xn--c2br7g|xn--cck2b3b|xn--cg4bki|xn--clchc0ea0b2g2a9gcd|xn--czr694b|xn--czrs0t|xn--czru2d|xn--d1acj3b|xn--d1alf|xn--e1a4c|xn--eckvdtc9d|xn--efvy88h|xn--estv75g|xn--fct429k|xn--fhbei|xn--fiq228c5hs|xn--fiq64b|xn--fiqs8s|xn--fiqz9s|xn--fjq720a|xn--flw351e|xn--fpcrj9c3d|xn--fzc2c9e2c|xn--fzys8d69uvgm|xn--g2xx48c|xn--gckr3f0f|xn--gecrj9c|xn--gk3at1e|xn--h2brj9c|xn--hxt814e|xn--i1b6b1a6a2e|xn--imr513n|xn--io0a7i|xn--j1aef|xn--j1amh|xn--j6w193g|xn--jlq61u9w7b|xn--jvr189m|xn--kcrx77d1x4a|xn--kprw13d|xn--kpry57d|xn--kpu716f|xn--kput3i|xn--l1acc|xn--lgbbat1ad8j|xn--mgb2ddes|xn--mgb9awbf|xn--mgba3a3ejt|xn--mgba3a4f16a|xn--mgba3a4fra|xn--mgba7c0bbn0a|xn--mgbaakc7dvf|xn--mgbaam7a8h|xn--mgbab2bd|xn--mgbai9a5eva00b|xn--mgbai9azgqp6j|xn--mgbayh7gpa|xn--mgbb9fbpob|xn--mgbbh1a71e|xn--mgbc0a9azcg|xn--mgbca7dzdo|xn--mgberp4a5d4a87g|xn--mgberp4a5d4ar|xn--mgbi4ecexp|xn--mgbpl2fh|xn--mgbqly7c0a67fbc|xn--mgbqly7cvafr|xn--mgbt3dhd|xn--mgbtf8fl|xn--mgbtx2b|xn--mgbx4cd0ab|xn--mix082f|xn--mix891f|xn--mk1bu44c|xn--mxtq1m|xn--ngbc5azd|xn--ngbe9e0a|xn--ngbrx|xn--nnx388a|xn--node|xn--nqv7f|xn--nqv7fs00ema|xn--nyqy26a|xn--o3cw4h|xn--ogbpf8fl|xn--p1acf|xn--p1ai|xn--pbt977c|xn--pgbs0dh|xn--pssy2u|xn--q9jyb4c|xn--qcka1pmc|xn--qxam|xn--rhqv96g|xn--rovu88b|xn--s9brj9c|xn--ses554g|xn--t60b56a|xn--tckwe|xn--tiq49xqyj|xn--unup4y|xn--vermgensberater-ctb|xn--vermgensberatung-pwb|xn--vhquv|xn--vuq861b|xn--w4r85el8fhu5dnra|xn--w4rs40l|xn--wgbh1c|xn--wgbl6a|xn--xhq521b|xn--xkc2al3hye2a|xn--xkc2dl3a5ee0h|xn--y9a3aq|xn--yfro4i67o|xn--ygbi2ammx|xn--zfr164b|xperia|xxx|xyz|yachts|yahoo|yamaxun|yandex|ye|yodobashi|yoga|yokohama|you|youtube|yt|yun|za|zappos|zara|zero|zip|zippo|zkey|zm|zone|zuerich|zw|ελ|бел|дети|ею|католик|ком|мкд|мон|москва|онлайн|орг|рус|рф|сайт|срб|укр|қаз|հայ|קום|ابوظبي|اتصالات|ارامكو|الاردن|الجزائر|السعودية|السعوديه|السعودیة|السعودیۃ|العليان|المغرب|اليمن|امارات|ايران|ایران|بازار|بيتك|بھارت|تونس|سودان|سوريا|سورية|شبكة|عراق|عرب|عمان|فلسطين|قطر|كاثوليك|كوم|مصر|مليسيا|موبايلي|موقع|همراه|پاكستان|پاکستان|कॉम|नेट|भारत|संगठन|বাংলা|ভারত|ਭਾਰਤ|ભારત|இந்தியா|இலங்கை|சிங்கப்பூர்|భారత్|ලංකා|คอม|ไทย|გე|みんな|クラウド|グーグル|コム|ストア|セール|ファッション|ポイント|一号店|世界|中信|中国|中國|中文网|企业|佛山|信息|健康|八卦|公司|公益|台湾|台灣|商城|商店|商标|嘉里|嘉里大酒店|在线|大众汽车|大拿|天主教|娱乐|家電|工行|广东|微博|慈善|我爱你|手机|手表|政务|政府|新加坡|新闻|时尚|書籍|机构|淡马锡|游戏|澳門|澳门|点看|珠宝|移动|组织机构|网址|网店|网站|网络|联通|臺灣|诺基亚|谷歌|购物|通販|集团|電訊盈科|飞利浦|食品|餐厅|香格里拉|香港|닷넷|닷컴|삼성|한국)(?-i)`
+ otherScheme = `(?i)(bitcoin|file|magnet|mailto|sms|tel|xmpp)(?-i):`
+)
diff --git a/vendor/github.com/mvdan/xurls/schemes.go b/vendor/github.com/mvdan/xurls/schemes.go
new file mode 100644
index 0000000..84767f0
--- /dev/null
+++ b/vendor/github.com/mvdan/xurls/schemes.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2015, Daniel Martí
+// See LICENSE for licensing information
+
+package xurls
+
+// SchemesNoAuthority is a sorted list of some well-known url schemes that are
+// followed by ":" instead of "://". Since these are more prone to false
+// positives, we limit their matching.
+var SchemesNoAuthority = []string{
+ `bitcoin`, // Bitcoin
+ `file`, // Files
+ `magnet`, // Torrent magnets
+ `mailto`, // Mail
+ `sms`, // SMS
+ `tel`, // Telephone
+ `xmpp`, // XMPP
+}
diff --git a/vendor/github.com/mvdan/xurls/tlds.go b/vendor/github.com/mvdan/xurls/tlds.go
new file mode 100644
index 0000000..946ff9e
--- /dev/null
+++ b/vendor/github.com/mvdan/xurls/tlds.go
@@ -0,0 +1,1564 @@
+// Generated by tldsgen
+
+package xurls
+
+// TLDs is a sorted list of all public top-level domains.
+//
+// Sources:
+// * https://data.iana.org/TLD/tlds-alpha-by-domain.txt
+// * https://publicsuffix.org/list/effective_tld_names.dat
+var TLDs = []string{
+ `aaa`,
+ `aarp`,
+ `abarth`,
+ `abb`,
+ `abbott`,
+ `abbvie`,
+ `abc`,
+ `able`,
+ `abogado`,
+ `abudhabi`,
+ `ac`,
+ `academy`,
+ `accenture`,
+ `accountant`,
+ `accountants`,
+ `aco`,
+ `active`,
+ `actor`,
+ `ad`,
+ `adac`,
+ `ads`,
+ `adult`,
+ `ae`,
+ `aeg`,
+ `aero`,
+ `aetna`,
+ `af`,
+ `afamilycompany`,
+ `afl`,
+ `africa`,
+ `ag`,
+ `agakhan`,
+ `agency`,
+ `ai`,
+ `aig`,
+ `aigo`,
+ `airbus`,
+ `airforce`,
+ `airtel`,
+ `akdn`,
+ `al`,
+ `alfaromeo`,
+ `alibaba`,
+ `alipay`,
+ `allfinanz`,
+ `allstate`,
+ `ally`,
+ `alsace`,
+ `alstom`,
+ `am`,
+ `americanexpress`,
+ `americanfamily`,
+ `amex`,
+ `amfam`,
+ `amica`,
+ `amsterdam`,
+ `analytics`,
+ `android`,
+ `anquan`,
+ `anz`,
+ `ao`,
+ `aol`,
+ `apartments`,
+ `app`,
+ `apple`,
+ `aq`,
+ `aquarelle`,
+ `ar`,
+ `arab`,
+ `aramco`,
+ `archi`,
+ `army`,
+ `arpa`,
+ `art`,
+ `arte`,
+ `as`,
+ `asda`,
+ `asia`,
+ `associates`,
+ `at`,
+ `athleta`,
+ `attorney`,
+ `au`,
+ `auction`,
+ `audi`,
+ `audible`,
+ `audio`,
+ `auspost`,
+ `author`,
+ `auto`,
+ `autos`,
+ `avianca`,
+ `aw`,
+ `aws`,
+ `ax`,
+ `axa`,
+ `az`,
+ `azure`,
+ `ba`,
+ `baby`,
+ `baidu`,
+ `banamex`,
+ `bananarepublic`,
+ `band`,
+ `bank`,
+ `bar`,
+ `barcelona`,
+ `barclaycard`,
+ `barclays`,
+ `barefoot`,
+ `bargains`,
+ `baseball`,
+ `basketball`,
+ `bauhaus`,
+ `bayern`,
+ `bb`,
+ `bbc`,
+ `bbt`,
+ `bbva`,
+ `bcg`,
+ `bcn`,
+ `bd`,
+ `be`,
+ `beats`,
+ `beauty`,
+ `beer`,
+ `bentley`,
+ `berlin`,
+ `best`,
+ `bestbuy`,
+ `bet`,
+ `bf`,
+ `bg`,
+ `bh`,
+ `bharti`,
+ `bi`,
+ `bible`,
+ `bid`,
+ `bike`,
+ `bing`,
+ `bingo`,
+ `bio`,
+ `biz`,
+ `bj`,
+ `black`,
+ `blackfriday`,
+ `blanco`,
+ `blockbuster`,
+ `blog`,
+ `bloomberg`,
+ `blue`,
+ `bm`,
+ `bms`,
+ `bmw`,
+ `bn`,
+ `bnl`,
+ `bnpparibas`,
+ `bo`,
+ `boats`,
+ `boehringer`,
+ `bofa`,
+ `bom`,
+ `bond`,
+ `boo`,
+ `book`,
+ `booking`,
+ `boots`,
+ `bosch`,
+ `bostik`,
+ `boston`,
+ `bot`,
+ `boutique`,
+ `box`,
+ `br`,
+ `bradesco`,
+ `bridgestone`,
+ `broadway`,
+ `broker`,
+ `brother`,
+ `brussels`,
+ `bs`,
+ `bt`,
+ `budapest`,
+ `bugatti`,
+ `build`,
+ `builders`,
+ `business`,
+ `buy`,
+ `buzz`,
+ `bv`,
+ `bw`,
+ `by`,
+ `bz`,
+ `bzh`,
+ `ca`,
+ `cab`,
+ `cafe`,
+ `cal`,
+ `call`,
+ `calvinklein`,
+ `cam`,
+ `camera`,
+ `camp`,
+ `cancerresearch`,
+ `canon`,
+ `capetown`,
+ `capital`,
+ `capitalone`,
+ `car`,
+ `caravan`,
+ `cards`,
+ `care`,
+ `career`,
+ `careers`,
+ `cars`,
+ `cartier`,
+ `casa`,
+ `case`,
+ `caseih`,
+ `cash`,
+ `casino`,
+ `cat`,
+ `catering`,
+ `catholic`,
+ `cba`,
+ `cbn`,
+ `cbre`,
+ `cbs`,
+ `cc`,
+ `cd`,
+ `ceb`,
+ `center`,
+ `ceo`,
+ `cern`,
+ `cf`,
+ `cfa`,
+ `cfd`,
+ `cg`,
+ `ch`,
+ `chanel`,
+ `channel`,
+ `chase`,
+ `chat`,
+ `cheap`,
+ `chintai`,
+ `chloe`,
+ `christmas`,
+ `chrome`,
+ `chrysler`,
+ `church`,
+ `ci`,
+ `cipriani`,
+ `circle`,
+ `cisco`,
+ `citadel`,
+ `citi`,
+ `citic`,
+ `city`,
+ `cityeats`,
+ `ck`,
+ `cl`,
+ `claims`,
+ `cleaning`,
+ `click`,
+ `clinic`,
+ `clinique`,
+ `clothing`,
+ `cloud`,
+ `club`,
+ `clubmed`,
+ `cm`,
+ `cn`,
+ `co`,
+ `coach`,
+ `codes`,
+ `coffee`,
+ `college`,
+ `cologne`,
+ `com`,
+ `comcast`,
+ `commbank`,
+ `community`,
+ `company`,
+ `compare`,
+ `computer`,
+ `comsec`,
+ `condos`,
+ `construction`,
+ `consulting`,
+ `contact`,
+ `contractors`,
+ `cooking`,
+ `cookingchannel`,
+ `cool`,
+ `coop`,
+ `corsica`,
+ `country`,
+ `coupon`,
+ `coupons`,
+ `courses`,
+ `cr`,
+ `credit`,
+ `creditcard`,
+ `creditunion`,
+ `cricket`,
+ `crown`,
+ `crs`,
+ `cruise`,
+ `cruises`,
+ `csc`,
+ `cu`,
+ `cuisinella`,
+ `cv`,
+ `cw`,
+ `cx`,
+ `cy`,
+ `cymru`,
+ `cyou`,
+ `cz`,
+ `dabur`,
+ `dad`,
+ `dance`,
+ `data`,
+ `date`,
+ `dating`,
+ `datsun`,
+ `day`,
+ `dclk`,
+ `dds`,
+ `de`,
+ `deal`,
+ `dealer`,
+ `deals`,
+ `degree`,
+ `delivery`,
+ `dell`,
+ `deloitte`,
+ `delta`,
+ `democrat`,
+ `dental`,
+ `dentist`,
+ `desi`,
+ `design`,
+ `dev`,
+ `dhl`,
+ `diamonds`,
+ `diet`,
+ `digital`,
+ `direct`,
+ `directory`,
+ `discount`,
+ `discover`,
+ `dish`,
+ `diy`,
+ `dj`,
+ `dk`,
+ `dm`,
+ `dnp`,
+ `do`,
+ `docs`,
+ `doctor`,
+ `dodge`,
+ `dog`,
+ `doha`,
+ `domains`,
+ `dot`,
+ `download`,
+ `drive`,
+ `dtv`,
+ `dubai`,
+ `duck`,
+ `dunlop`,
+ `duns`,
+ `dupont`,
+ `durban`,
+ `dvag`,
+ `dvr`,
+ `dwg`,
+ `dz`,
+ `earth`,
+ `eat`,
+ `ec`,
+ `eco`,
+ `edeka`,
+ `edu`,
+ `education`,
+ `ee`,
+ `eg`,
+ `email`,
+ `emerck`,
+ `energy`,
+ `engineer`,
+ `engineering`,
+ `enterprises`,
+ `epost`,
+ `epson`,
+ `equipment`,
+ `er`,
+ `ericsson`,
+ `erni`,
+ `es`,
+ `esq`,
+ `estate`,
+ `esurance`,
+ `et`,
+ `etisalat`,
+ `eu`,
+ `eurovision`,
+ `eus`,
+ `events`,
+ `everbank`,
+ `exchange`,
+ `expert`,
+ `exposed`,
+ `express`,
+ `extraspace`,
+ `fage`,
+ `fail`,
+ `fairwinds`,
+ `faith`,
+ `family`,
+ `fan`,
+ `fans`,
+ `farm`,
+ `farmers`,
+ `fashion`,
+ `fast`,
+ `fedex`,
+ `feedback`,
+ `ferrari`,
+ `ferrero`,
+ `fi`,
+ `fiat`,
+ `fidelity`,
+ `fido`,
+ `film`,
+ `final`,
+ `finance`,
+ `financial`,
+ `fire`,
+ `firestone`,
+ `firmdale`,
+ `fish`,
+ `fishing`,
+ `fit`,
+ `fitness`,
+ `fj`,
+ `fk`,
+ `flickr`,
+ `flights`,
+ `flir`,
+ `florist`,
+ `flowers`,
+ `fly`,
+ `fm`,
+ `fo`,
+ `foo`,
+ `food`,
+ `foodnetwork`,
+ `football`,
+ `ford`,
+ `forex`,
+ `forsale`,
+ `forum`,
+ `foundation`,
+ `fox`,
+ `fr`,
+ `free`,
+ `fresenius`,
+ `frl`,
+ `frogans`,
+ `frontdoor`,
+ `frontier`,
+ `ftr`,
+ `fujitsu`,
+ `fujixerox`,
+ `fun`,
+ `fund`,
+ `furniture`,
+ `futbol`,
+ `fyi`,
+ `ga`,
+ `gal`,
+ `gallery`,
+ `gallo`,
+ `gallup`,
+ `game`,
+ `games`,
+ `gap`,
+ `garden`,
+ `gb`,
+ `gbiz`,
+ `gd`,
+ `gdn`,
+ `ge`,
+ `gea`,
+ `gent`,
+ `genting`,
+ `george`,
+ `gf`,
+ `gg`,
+ `ggee`,
+ `gh`,
+ `gi`,
+ `gift`,
+ `gifts`,
+ `gives`,
+ `giving`,
+ `gl`,
+ `glade`,
+ `glass`,
+ `gle`,
+ `global`,
+ `globo`,
+ `gm`,
+ `gmail`,
+ `gmbh`,
+ `gmo`,
+ `gmx`,
+ `gn`,
+ `godaddy`,
+ `gold`,
+ `goldpoint`,
+ `golf`,
+ `goo`,
+ `goodhands`,
+ `goodyear`,
+ `goog`,
+ `google`,
+ `gop`,
+ `got`,
+ `gov`,
+ `gp`,
+ `gq`,
+ `gr`,
+ `grainger`,
+ `graphics`,
+ `gratis`,
+ `green`,
+ `gripe`,
+ `grocery`,
+ `group`,
+ `gs`,
+ `gt`,
+ `gu`,
+ `guardian`,
+ `gucci`,
+ `guge`,
+ `guide`,
+ `guitars`,
+ `guru`,
+ `gw`,
+ `gy`,
+ `hair`,
+ `hamburg`,
+ `hangout`,
+ `haus`,
+ `hbo`,
+ `hdfc`,
+ `hdfcbank`,
+ `health`,
+ `healthcare`,
+ `help`,
+ `helsinki`,
+ `here`,
+ `hermes`,
+ `hgtv`,
+ `hiphop`,
+ `hisamitsu`,
+ `hitachi`,
+ `hiv`,
+ `hk`,
+ `hkt`,
+ `hm`,
+ `hn`,
+ `hockey`,
+ `holdings`,
+ `holiday`,
+ `homedepot`,
+ `homegoods`,
+ `homes`,
+ `homesense`,
+ `honda`,
+ `honeywell`,
+ `horse`,
+ `hospital`,
+ `host`,
+ `hosting`,
+ `hot`,
+ `hoteles`,
+ `hotels`,
+ `hotmail`,
+ `house`,
+ `how`,
+ `hr`,
+ `hsbc`,
+ `ht`,
+ `htc`,
+ `hu`,
+ `hughes`,
+ `hyatt`,
+ `hyundai`,
+ `ibm`,
+ `icbc`,
+ `ice`,
+ `icu`,
+ `id`,
+ `ie`,
+ `ieee`,
+ `ifm`,
+ `iinet`,
+ `ikano`,
+ `il`,
+ `im`,
+ `imamat`,
+ `imdb`,
+ `immo`,
+ `immobilien`,
+ `in`,
+ `industries`,
+ `infiniti`,
+ `info`,
+ `ing`,
+ `ink`,
+ `institute`,
+ `insurance`,
+ `insure`,
+ `int`,
+ `intel`,
+ `international`,
+ `intuit`,
+ `investments`,
+ `io`,
+ `ipiranga`,
+ `iq`,
+ `ir`,
+ `irish`,
+ `is`,
+ `iselect`,
+ `ismaili`,
+ `ist`,
+ `istanbul`,
+ `it`,
+ `itau`,
+ `itv`,
+ `iveco`,
+ `iwc`,
+ `jaguar`,
+ `java`,
+ `jcb`,
+ `jcp`,
+ `je`,
+ `jeep`,
+ `jetzt`,
+ `jewelry`,
+ `jio`,
+ `jlc`,
+ `jll`,
+ `jm`,
+ `jmp`,
+ `jnj`,
+ `jo`,
+ `jobs`,
+ `joburg`,
+ `jot`,
+ `joy`,
+ `jp`,
+ `jpmorgan`,
+ `jprs`,
+ `juegos`,
+ `juniper`,
+ `kaufen`,
+ `kddi`,
+ `ke`,
+ `kerryhotels`,
+ `kerrylogistics`,
+ `kerryproperties`,
+ `kfh`,
+ `kg`,
+ `kh`,
+ `ki`,
+ `kia`,
+ `kim`,
+ `kinder`,
+ `kindle`,
+ `kitchen`,
+ `kiwi`,
+ `km`,
+ `kn`,
+ `koeln`,
+ `komatsu`,
+ `kosher`,
+ `kp`,
+ `kpmg`,
+ `kpn`,
+ `kr`,
+ `krd`,
+ `kred`,
+ `kuokgroup`,
+ `kw`,
+ `ky`,
+ `kyoto`,
+ `kz`,
+ `la`,
+ `lacaixa`,
+ `ladbrokes`,
+ `lamborghini`,
+ `lamer`,
+ `lancaster`,
+ `lancia`,
+ `lancome`,
+ `land`,
+ `landrover`,
+ `lanxess`,
+ `lasalle`,
+ `lat`,
+ `latino`,
+ `latrobe`,
+ `law`,
+ `lawyer`,
+ `lb`,
+ `lc`,
+ `lds`,
+ `lease`,
+ `leclerc`,
+ `lefrak`,
+ `legal`,
+ `lego`,
+ `lexus`,
+ `lgbt`,
+ `li`,
+ `liaison`,
+ `lidl`,
+ `life`,
+ `lifeinsurance`,
+ `lifestyle`,
+ `lighting`,
+ `like`,
+ `lilly`,
+ `limited`,
+ `limo`,
+ `lincoln`,
+ `linde`,
+ `link`,
+ `lipsy`,
+ `live`,
+ `living`,
+ `lixil`,
+ `lk`,
+ `loan`,
+ `loans`,
+ `locker`,
+ `locus`,
+ `loft`,
+ `lol`,
+ `london`,
+ `lotte`,
+ `lotto`,
+ `love`,
+ `lpl`,
+ `lplfinancial`,
+ `lr`,
+ `ls`,
+ `lt`,
+ `ltd`,
+ `ltda`,
+ `lu`,
+ `lundbeck`,
+ `lupin`,
+ `luxe`,
+ `luxury`,
+ `lv`,
+ `ly`,
+ `ma`,
+ `macys`,
+ `madrid`,
+ `maif`,
+ `maison`,
+ `makeup`,
+ `man`,
+ `management`,
+ `mango`,
+ `map`,
+ `market`,
+ `marketing`,
+ `markets`,
+ `marriott`,
+ `marshalls`,
+ `maserati`,
+ `mattel`,
+ `mba`,
+ `mc`,
+ `mcd`,
+ `mcdonalds`,
+ `mckinsey`,
+ `md`,
+ `me`,
+ `med`,
+ `media`,
+ `meet`,
+ `melbourne`,
+ `meme`,
+ `memorial`,
+ `men`,
+ `menu`,
+ `meo`,
+ `merckmsd`,
+ `metlife`,
+ `mg`,
+ `mh`,
+ `miami`,
+ `microsoft`,
+ `mil`,
+ `mini`,
+ `mint`,
+ `mit`,
+ `mitsubishi`,
+ `mk`,
+ `ml`,
+ `mlb`,
+ `mls`,
+ `mm`,
+ `mma`,
+ `mn`,
+ `mo`,
+ `mobi`,
+ `mobile`,
+ `mobily`,
+ `moda`,
+ `moe`,
+ `moi`,
+ `mom`,
+ `monash`,
+ `money`,
+ `monster`,
+ `montblanc`,
+ `mopar`,
+ `mormon`,
+ `mortgage`,
+ `moscow`,
+ `moto`,
+ `motorcycles`,
+ `mov`,
+ `movie`,
+ `movistar`,
+ `mp`,
+ `mq`,
+ `mr`,
+ `ms`,
+ `msd`,
+ `mt`,
+ `mtn`,
+ `mtpc`,
+ `mtr`,
+ `mu`,
+ `museum`,
+ `mutual`,
+ `mutuelle`,
+ `mv`,
+ `mw`,
+ `mx`,
+ `my`,
+ `mz`,
+ `na`,
+ `nab`,
+ `nadex`,
+ `nagoya`,
+ `name`,
+ `nationwide`,
+ `natura`,
+ `navy`,
+ `nba`,
+ `nc`,
+ `ne`,
+ `nec`,
+ `net`,
+ `netbank`,
+ `netflix`,
+ `network`,
+ `neustar`,
+ `new`,
+ `newholland`,
+ `news`,
+ `next`,
+ `nextdirect`,
+ `nexus`,
+ `nf`,
+ `nfl`,
+ `ng`,
+ `ngo`,
+ `nhk`,
+ `ni`,
+ `nico`,
+ `nike`,
+ `nikon`,
+ `ninja`,
+ `nissan`,
+ `nissay`,
+ `nl`,
+ `no`,
+ `nokia`,
+ `northwesternmutual`,
+ `norton`,
+ `now`,
+ `nowruz`,
+ `nowtv`,
+ `np`,
+ `nr`,
+ `nra`,
+ `nrw`,
+ `ntt`,
+ `nu`,
+ `nyc`,
+ `nz`,
+ `obi`,
+ `observer`,
+ `off`,
+ `office`,
+ `okinawa`,
+ `olayan`,
+ `olayangroup`,
+ `oldnavy`,
+ `ollo`,
+ `om`,
+ `omega`,
+ `one`,
+ `ong`,
+ `onl`,
+ `online`,
+ `onyourside`,
+ `ooo`,
+ `open`,
+ `oracle`,
+ `orange`,
+ `org`,
+ `organic`,
+ `orientexpress`,
+ `origins`,
+ `osaka`,
+ `otsuka`,
+ `ott`,
+ `ovh`,
+ `pa`,
+ `page`,
+ `pamperedchef`,
+ `panasonic`,
+ `panerai`,
+ `paris`,
+ `pars`,
+ `partners`,
+ `parts`,
+ `party`,
+ `passagens`,
+ `pay`,
+ `pccw`,
+ `pe`,
+ `pet`,
+ `pf`,
+ `pfizer`,
+ `pg`,
+ `ph`,
+ `pharmacy`,
+ `phd`,
+ `philips`,
+ `phone`,
+ `photo`,
+ `photography`,
+ `photos`,
+ `physio`,
+ `piaget`,
+ `pics`,
+ `pictet`,
+ `pictures`,
+ `pid`,
+ `pin`,
+ `ping`,
+ `pink`,
+ `pioneer`,
+ `pizza`,
+ `pk`,
+ `pl`,
+ `place`,
+ `play`,
+ `playstation`,
+ `plumbing`,
+ `plus`,
+ `pm`,
+ `pn`,
+ `pnc`,
+ `pohl`,
+ `poker`,
+ `politie`,
+ `porn`,
+ `post`,
+ `pr`,
+ `pramerica`,
+ `praxi`,
+ `press`,
+ `prime`,
+ `pro`,
+ `prod`,
+ `productions`,
+ `prof`,
+ `progressive`,
+ `promo`,
+ `properties`,
+ `property`,
+ `protection`,
+ `pru`,
+ `prudential`,
+ `ps`,
+ `pt`,
+ `pub`,
+ `pw`,
+ `pwc`,
+ `py`,
+ `qa`,
+ `qpon`,
+ `quebec`,
+ `quest`,
+ `qvc`,
+ `racing`,
+ `radio`,
+ `raid`,
+ `re`,
+ `read`,
+ `realestate`,
+ `realtor`,
+ `realty`,
+ `recipes`,
+ `red`,
+ `redstone`,
+ `redumbrella`,
+ `rehab`,
+ `reise`,
+ `reisen`,
+ `reit`,
+ `reliance`,
+ `ren`,
+ `rent`,
+ `rentals`,
+ `repair`,
+ `report`,
+ `republican`,
+ `rest`,
+ `restaurant`,
+ `review`,
+ `reviews`,
+ `rexroth`,
+ `rich`,
+ `richardli`,
+ `ricoh`,
+ `rightathome`,
+ `ril`,
+ `rio`,
+ `rip`,
+ `rmit`,
+ `ro`,
+ `rocher`,
+ `rocks`,
+ `rodeo`,
+ `rogers`,
+ `room`,
+ `rs`,
+ `rsvp`,
+ `ru`,
+ `ruhr`,
+ `run`,
+ `rw`,
+ `rwe`,
+ `ryukyu`,
+ `sa`,
+ `saarland`,
+ `safe`,
+ `safety`,
+ `sakura`,
+ `sale`,
+ `salon`,
+ `samsclub`,
+ `samsung`,
+ `sandvik`,
+ `sandvikcoromant`,
+ `sanofi`,
+ `sap`,
+ `sapo`,
+ `sarl`,
+ `sas`,
+ `save`,
+ `saxo`,
+ `sb`,
+ `sbi`,
+ `sbs`,
+ `sc`,
+ `sca`,
+ `scb`,
+ `schaeffler`,
+ `schmidt`,
+ `scholarships`,
+ `school`,
+ `schule`,
+ `schwarz`,
+ `science`,
+ `scjohnson`,
+ `scor`,
+ `scot`,
+ `sd`,
+ `se`,
+ `search`,
+ `seat`,
+ `secure`,
+ `security`,
+ `seek`,
+ `select`,
+ `sener`,
+ `services`,
+ `ses`,
+ `seven`,
+ `sew`,
+ `sex`,
+ `sexy`,
+ `sfr`,
+ `sg`,
+ `sh`,
+ `shangrila`,
+ `sharp`,
+ `shaw`,
+ `shell`,
+ `shia`,
+ `shiksha`,
+ `shoes`,
+ `shop`,
+ `shopping`,
+ `shouji`,
+ `show`,
+ `showtime`,
+ `shriram`,
+ `si`,
+ `silk`,
+ `sina`,
+ `singles`,
+ `site`,
+ `sj`,
+ `sk`,
+ `ski`,
+ `skin`,
+ `sky`,
+ `skype`,
+ `sl`,
+ `sling`,
+ `sm`,
+ `smart`,
+ `smile`,
+ `sn`,
+ `sncf`,
+ `so`,
+ `soccer`,
+ `social`,
+ `softbank`,
+ `software`,
+ `sohu`,
+ `solar`,
+ `solutions`,
+ `song`,
+ `sony`,
+ `soy`,
+ `space`,
+ `spiegel`,
+ `spot`,
+ `spreadbetting`,
+ `sr`,
+ `srl`,
+ `srt`,
+ `st`,
+ `stada`,
+ `staples`,
+ `star`,
+ `starhub`,
+ `statebank`,
+ `statefarm`,
+ `statoil`,
+ `stc`,
+ `stcgroup`,
+ `stockholm`,
+ `storage`,
+ `store`,
+ `stream`,
+ `studio`,
+ `study`,
+ `style`,
+ `su`,
+ `sucks`,
+ `supplies`,
+ `supply`,
+ `support`,
+ `surf`,
+ `surgery`,
+ `suzuki`,
+ `sv`,
+ `swatch`,
+ `swiftcover`,
+ `swiss`,
+ `sx`,
+ `sy`,
+ `sydney`,
+ `symantec`,
+ `systems`,
+ `sz`,
+ `tab`,
+ `taipei`,
+ `talk`,
+ `taobao`,
+ `target`,
+ `tatamotors`,
+ `tatar`,
+ `tattoo`,
+ `tax`,
+ `taxi`,
+ `tc`,
+ `tci`,
+ `td`,
+ `tdk`,
+ `team`,
+ `tech`,
+ `technology`,
+ `tel`,
+ `telecity`,
+ `telefonica`,
+ `temasek`,
+ `tennis`,
+ `teva`,
+ `tf`,
+ `tg`,
+ `th`,
+ `thd`,
+ `theater`,
+ `theatre`,
+ `theguardian`,
+ `tiaa`,
+ `tickets`,
+ `tienda`,
+ `tiffany`,
+ `tips`,
+ `tires`,
+ `tirol`,
+ `tj`,
+ `tjmaxx`,
+ `tjx`,
+ `tk`,
+ `tkmaxx`,
+ `tl`,
+ `tm`,
+ `tmall`,
+ `tn`,
+ `to`,
+ `today`,
+ `tokyo`,
+ `tools`,
+ `top`,
+ `toray`,
+ `toshiba`,
+ `total`,
+ `tours`,
+ `town`,
+ `toyota`,
+ `toys`,
+ `tr`,
+ `trade`,
+ `trading`,
+ `training`,
+ `travel`,
+ `travelchannel`,
+ `travelers`,
+ `travelersinsurance`,
+ `trust`,
+ `trv`,
+ `tt`,
+ `tube`,
+ `tui`,
+ `tunes`,
+ `tushu`,
+ `tv`,
+ `tvs`,
+ `tw`,
+ `tz`,
+ `ua`,
+ `ubank`,
+ `ubs`,
+ `uconnect`,
+ `ug`,
+ `uk`,
+ `unicom`,
+ `university`,
+ `uno`,
+ `uol`,
+ `ups`,
+ `us`,
+ `uy`,
+ `uz`,
+ `va`,
+ `vacations`,
+ `vana`,
+ `vanguard`,
+ `vc`,
+ `ve`,
+ `vegas`,
+ `ventures`,
+ `verisign`,
+ `vermögensberater`,
+ `vermögensberatung`,
+ `versicherung`,
+ `vet`,
+ `vg`,
+ `vi`,
+ `viajes`,
+ `video`,
+ `vig`,
+ `viking`,
+ `villas`,
+ `vin`,
+ `vip`,
+ `virgin`,
+ `visa`,
+ `vision`,
+ `vista`,
+ `vistaprint`,
+ `viva`,
+ `vivo`,
+ `vlaanderen`,
+ `vn`,
+ `vodka`,
+ `volkswagen`,
+ `volvo`,
+ `vote`,
+ `voting`,
+ `voto`,
+ `voyage`,
+ `vu`,
+ `vuelos`,
+ `wales`,
+ `walmart`,
+ `walter`,
+ `wang`,
+ `wanggou`,
+ `warman`,
+ `watch`,
+ `watches`,
+ `weather`,
+ `weatherchannel`,
+ `webcam`,
+ `weber`,
+ `website`,
+ `wed`,
+ `wedding`,
+ `weibo`,
+ `weir`,
+ `wf`,
+ `whoswho`,
+ `wien`,
+ `wiki`,
+ `williamhill`,
+ `win`,
+ `windows`,
+ `wine`,
+ `winners`,
+ `wme`,
+ `wolterskluwer`,
+ `woodside`,
+ `work`,
+ `works`,
+ `world`,
+ `wow`,
+ `ws`,
+ `wtc`,
+ `wtf`,
+ `xbox`,
+ `xerox`,
+ `xfinity`,
+ `xihuan`,
+ `xin`,
+ `xperia`,
+ `xxx`,
+ `xyz`,
+ `yachts`,
+ `yahoo`,
+ `yamaxun`,
+ `yandex`,
+ `ye`,
+ `yodobashi`,
+ `yoga`,
+ `yokohama`,
+ `you`,
+ `youtube`,
+ `yt`,
+ `yun`,
+ `za`,
+ `zappos`,
+ `zara`,
+ `zero`,
+ `zip`,
+ `zippo`,
+ `zm`,
+ `zone`,
+ `zuerich`,
+ `zw`,
+ `ελ`,
+ `бел`,
+ `дети`,
+ `ею`,
+ `католик`,
+ `ком`,
+ `мкд`,
+ `мон`,
+ `москва`,
+ `онлайн`,
+ `орг`,
+ `рус`,
+ `рф`,
+ `сайт`,
+ `срб`,
+ `укр`,
+ `қаз`,
+ `հայ`,
+ `קום`,
+ `ابوظبي`,
+ `اتصالات`,
+ `ارامكو`,
+ `الاردن`,
+ `الجزائر`,
+ `السعودية`,
+ `السعوديه`,
+ `السعودیة`,
+ `السعودیۃ`,
+ `العليان`,
+ `المغرب`,
+ `اليمن`,
+ `امارات`,
+ `ايران`,
+ `ایران`,
+ `بازار`,
+ `بيتك`,
+ `بھارت`,
+ `تونس`,
+ `سودان`,
+ `سوريا`,
+ `سورية`,
+ `شبكة`,
+ `عراق`,
+ `عرب`,
+ `عمان`,
+ `فلسطين`,
+ `قطر`,
+ `كاثوليك`,
+ `كوم`,
+ `مصر`,
+ `مليسيا`,
+ `موبايلي`,
+ `موقع`,
+ `همراه`,
+ `پاكستان`,
+ `پاکستان`,
+ `कॉम`,
+ `नेट`,
+ `भारत`,
+ `संगठन`,
+ `বাংলা`,
+ `ভারত`,
+ `ਭਾਰਤ`,
+ `ભારત`,
+ `இந்தியா`,
+ `இலங்கை`,
+ `சிங்கப்பூர்`,
+ `భారత్`,
+ `ලංකා`,
+ `คอม`,
+ `ไทย`,
+ `გე`,
+ `みんな`,
+ `クラウド`,
+ `グーグル`,
+ `コム`,
+ `ストア`,
+ `セール`,
+ `ファッション`,
+ `ポイント`,
+ `一号店`,
+ `世界`,
+ `中信`,
+ `中国`,
+ `中國`,
+ `中文网`,
+ `企业`,
+ `佛山`,
+ `信息`,
+ `健康`,
+ `八卦`,
+ `公司`,
+ `公益`,
+ `台湾`,
+ `台灣`,
+ `商城`,
+ `商店`,
+ `商标`,
+ `嘉里`,
+ `嘉里大酒店`,
+ `在线`,
+ `大众汽车`,
+ `大拿`,
+ `天主教`,
+ `娱乐`,
+ `家電`,
+ `工行`,
+ `广东`,
+ `微博`,
+ `慈善`,
+ `我爱你`,
+ `手机`,
+ `手表`,
+ `政务`,
+ `政府`,
+ `新加坡`,
+ `新闻`,
+ `时尚`,
+ `書籍`,
+ `机构`,
+ `淡马锡`,
+ `游戏`,
+ `澳門`,
+ `澳门`,
+ `点看`,
+ `珠宝`,
+ `移动`,
+ `组织机构`,
+ `网址`,
+ `网店`,
+ `网站`,
+ `网络`,
+ `联通`,
+ `臺灣`,
+ `诺基亚`,
+ `谷歌`,
+ `购物`,
+ `通販`,
+ `集团`,
+ `電訊盈科`,
+ `飞利浦`,
+ `食品`,
+ `餐厅`,
+ `香格里拉`,
+ `香港`,
+ `닷넷`,
+ `닷컴`,
+ `삼성`,
+ `한국`,
+}
diff --git a/vendor/github.com/mvdan/xurls/tlds_pseudo.go b/vendor/github.com/mvdan/xurls/tlds_pseudo.go
new file mode 100644
index 0000000..4183bd5
--- /dev/null
+++ b/vendor/github.com/mvdan/xurls/tlds_pseudo.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2015, Daniel Martí
+// See LICENSE for licensing information
+
+package xurls
+
+// PseudoTLDs is a sorted list of some widely used unofficial TLDs.
+//
+// Sources:
+// * https://en.wikipedia.org/wiki/Pseudo-top-level_domain
+// * https://en.wikipedia.org/wiki/Category:Pseudo-top-level_domains
+// * https://tools.ietf.org/html/draft-grothoff-iesg-special-use-p2p-names-00
+// * https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml
+var PseudoTLDs = []string{
+ `bit`, // Namecoin
+ `example`, // Example domain
+ `exit`, // Tor exit node
+ `gnu`, // GNS by public key
+ `i2p`, // I2P network
+ `invalid`, // Invalid domain
+ `local`, // Local network
+ `localhost`, // Local network
+ `onion`, // Tor hidden services
+ `test`, // Test domain
+ `zkey`, // GNS domain name
+}
diff --git a/vendor/github.com/mvdan/xurls/xurls.go b/vendor/github.com/mvdan/xurls/xurls.go
new file mode 100644
index 0000000..ec6894b
--- /dev/null
+++ b/vendor/github.com/mvdan/xurls/xurls.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2015, Daniel Martí
+// See LICENSE for licensing information
+
+// Package xurls extracts urls from plain text using regular expressions.
+package xurls
+
+import "regexp"
+
+//go:generate go run generate/tldsgen/main.go
+//go:generate go run generate/regexgen/main.go
+
+const (
+ letter = `\p{L}`
+ mark = `\p{M}`
+ number = `\p{N}`
+ iriChar = letter + mark + number
+ currency = `\p{Sc}`
+ otherSymb = `\p{So}`
+ endChar = iriChar + `/\-+_&~*%=#` + currency + otherSymb
+ midChar = endChar + `@.,:;'?!|`
+ wellParen = `\([` + midChar + `]*(\([` + midChar + `]*\)[` + midChar + `]*)*\)`
+ wellBrack = `\[[` + midChar + `]*(\[[` + midChar + `]*\][` + midChar + `]*)*\]`
+ wellBrace = `\{[` + midChar + `]*(\{[` + midChar + `]*\}[` + midChar + `]*)*\}`
+ wellAll = wellParen + `|` + wellBrack + `|` + wellBrace
+ pathCont = `([` + midChar + `]*(` + wellAll + `|[` + endChar + `])+)+`
+ comScheme = `[a-zA-Z][a-zA-Z.\-+]*://`
+ scheme = `(` + comScheme + `|` + otherScheme + `)`
+
+ iri = `[` + iriChar + `]([` + iriChar + `\-]*[` + iriChar + `])?`
+ domain = `(` + iri + `\.)+`
+ octet = `(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])`
+ ipv4Addr = `\b` + octet + `\.` + octet + `\.` + octet + `\.` + octet + `\b`
+ ipv6Addr = `([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:[0-9a-fA-F]{0,4}|:[0-9a-fA-F]{1,4})?|(:[0-9a-fA-F]{1,4}){0,2})|(:[0-9a-fA-F]{1,4}){0,3})|(:[0-9a-fA-F]{1,4}){0,4})|:(:[0-9a-fA-F]{1,4}){0,5})((:[0-9a-fA-F]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9a-fA-F]{1,4}:){1,6}|:):[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){7}:`
+ ipAddr = `(` + ipv4Addr + `|` + ipv6Addr + `)`
+ site = domain + gtld
+ hostName = `(` + site + `|` + ipAddr + `)`
+ port = `(:[0-9]*)?`
+ path = `(/|/` + pathCont + `?|\b|$)`
+ webURL = hostName + port + path
+
+ strict = `(\b` + scheme + pathCont + `)`
+ relaxed = `(` + strict + `|` + webURL + `)`
+)
+
+var (
+ // Relaxed matches all the urls it can find.
+ Relaxed = regexp.MustCompile(relaxed)
+ // Strict only matches urls with a scheme to avoid false positives.
+ Strict = regexp.MustCompile(strict)
+)
+
+func init() {
+ Relaxed.Longest()
+ Strict.Longest()
+}
+
+// StrictMatchingScheme produces a regexp that matches urls like Strict but
+// whose scheme matches the given regular expression.
+func StrictMatchingScheme(exp string) (*regexp.Regexp, error) {
+ strictMatching := `(\b(?i)(` + exp + `)(?-i)` + pathCont + `)`
+ re, err := regexp.Compile(strictMatching)
+ if err != nil {
+ return nil, err
+ }
+ re.Longest()
+ return re, nil
+}
diff --git a/vendor/github.com/nsf/termbox-go/AUTHORS b/vendor/github.com/nsf/termbox-go/AUTHORS
new file mode 100644
index 0000000..fe26fb0
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/AUTHORS
@@ -0,0 +1,4 @@
+# Please keep this file sorted.
+
+Georg Reinke
+nsf
diff --git a/vendor/github.com/nsf/termbox-go/LICENSE b/vendor/github.com/nsf/termbox-go/LICENSE
new file mode 100644
index 0000000..d9bc068
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2012 termbox-go authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/nsf/termbox-go/README.md b/vendor/github.com/nsf/termbox-go/README.md
new file mode 100644
index 0000000..e7c57a9
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/README.md
@@ -0,0 +1,30 @@
+## Termbox
+Termbox is a library that provides a minimalistic API which allows the programmer to write text-based user interfaces. The library is crossplatform and has both terminal-based implementations on *nix operating systems and a winapi console based implementation for windows operating systems. The basic idea is an abstraction of the greatest common subset of features available on all major terminals and other terminal-like APIs in a minimalistic fashion. Small API means it is easy to implement, test, maintain and learn it, that's what makes the termbox a distinct library in its area.
+
+### Installation
+Install and update this go package with `go get -u github.com/nsf/termbox-go`
+
+### Examples
+For examples of what can be done take a look at demos in the _demos directory. You can try them with go run: `go run _demos/keyboard.go`
+
+There are also some interesting projects using termbox-go:
+ - [godit](https://github.com/nsf/godit) is an emacsish lightweight text editor written using termbox.
+ - [gomatrix](https://github.com/GeertJohan/gomatrix) connects to The Matrix and displays its data streams in your terminal.
+ - [gotetris](https://github.com/jjinux/gotetris) is an implementation of Tetris.
+ - [sokoban-go](https://github.com/rn2dy/sokoban-go) is an implementation of sokoban game.
+ - [hecate](https://github.com/evanmiller/hecate) is a hex editor designed by Satan.
+ - [httopd](https://github.com/verdverm/httopd) is top for httpd logs.
+ - [mop](https://github.com/michaeldv/mop) is stock market tracker for hackers.
+ - [termui](https://github.com/gizak/termui) is a terminal dashboard.
+ - [termloop](https://github.com/JoelOtter/termloop) is a terminal game engine.
+ - [xterm-color-chart](https://github.com/kutuluk/xterm-color-chart) is a XTerm 256 color chart.
+ - [gocui](https://github.com/jroimartin/gocui) is a minimalist Go library aimed at creating console user interfaces.
+ - [dry](https://github.com/moncho/dry) is an interactive cli to manage Docker containers.
+ - [pxl](https://github.com/ichinaski/pxl) displays images in the terminal.
+ - [snake-game](https://github.com/DyegoCosta/snake-game) is an implementation of the Snake game.
+ - [gone](https://github.com/guillaumebreton/gone) is a CLI pomodoro® timer.
+ - [Spoof.go](https://github.com/sabey/spoofgo) controllable movement spoofing from the cli
+ - [lf](https://github.com/gokcehan/lf) is a terminal file manager
+
+### API reference
+[godoc.org/github.com/nsf/termbox-go](http://godoc.org/github.com/nsf/termbox-go)
diff --git a/vendor/github.com/nsf/termbox-go/api.go b/vendor/github.com/nsf/termbox-go/api.go
new file mode 100644
index 0000000..b242ddc
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/api.go
@@ -0,0 +1,457 @@
+// +build !windows
+
+package termbox
+
+import "github.com/mattn/go-runewidth"
+import "fmt"
+import "os"
+import "os/signal"
+import "syscall"
+import "runtime"
+
+// public API
+
+// Initializes termbox library. This function should be called before any other functions.
+// After successful initialization, the library must be finalized using 'Close' function.
+//
+// Example usage:
+// err := termbox.Init()
+// if err != nil {
+// panic(err)
+// }
+// defer termbox.Close()
+func Init() error {
+ var err error
+
+ out, err = os.OpenFile("/dev/tty", syscall.O_WRONLY, 0)
+ if err != nil {
+ return err
+ }
+ in, err = syscall.Open("/dev/tty", syscall.O_RDONLY, 0)
+ if err != nil {
+ return err
+ }
+
+ err = setup_term()
+ if err != nil {
+ return fmt.Errorf("termbox: error while reading terminfo data: %v", err)
+ }
+
+ signal.Notify(sigwinch, syscall.SIGWINCH)
+ signal.Notify(sigio, syscall.SIGIO)
+
+ _, err = fcntl(in, syscall.F_SETFL, syscall.O_ASYNC|syscall.O_NONBLOCK)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(in, syscall.F_SETOWN, syscall.Getpid())
+ if runtime.GOOS != "darwin" && err != nil {
+ return err
+ }
+ err = tcgetattr(out.Fd(), &orig_tios)
+ if err != nil {
+ return err
+ }
+
+ tios := orig_tios
+ tios.Iflag &^= syscall_IGNBRK | syscall_BRKINT | syscall_PARMRK |
+ syscall_ISTRIP | syscall_INLCR | syscall_IGNCR |
+ syscall_ICRNL | syscall_IXON
+ tios.Lflag &^= syscall_ECHO | syscall_ECHONL | syscall_ICANON |
+ syscall_ISIG | syscall_IEXTEN
+ tios.Cflag &^= syscall_CSIZE | syscall_PARENB
+ tios.Cflag |= syscall_CS8
+ tios.Cc[syscall_VMIN] = 1
+ tios.Cc[syscall_VTIME] = 0
+
+ err = tcsetattr(out.Fd(), &tios)
+ if err != nil {
+ return err
+ }
+
+ out.WriteString(funcs[t_enter_ca])
+ out.WriteString(funcs[t_enter_keypad])
+ out.WriteString(funcs[t_hide_cursor])
+ out.WriteString(funcs[t_clear_screen])
+
+ termw, termh = get_term_size(out.Fd())
+ back_buffer.init(termw, termh)
+ front_buffer.init(termw, termh)
+ back_buffer.clear()
+ front_buffer.clear()
+
+ go func() {
+ buf := make([]byte, 128)
+ for {
+ select {
+ case <-sigio:
+ for {
+ n, err := syscall.Read(in, buf)
+ if err == syscall.EAGAIN || err == syscall.EWOULDBLOCK {
+ break
+ }
+ select {
+ case input_comm <- input_event{buf[:n], err}:
+ ie := <-input_comm
+ buf = ie.data[:128]
+ case <-quit:
+ return
+ }
+ }
+ case <-quit:
+ return
+ }
+ }
+ }()
+
+ IsInit = true
+ return nil
+}
+
+// Interrupt an in-progress call to PollEvent by causing it to return
+// EventInterrupt. Note that this function will block until the PollEvent
+// function has successfully been interrupted.
+func Interrupt() {
+ interrupt_comm <- struct{}{}
+}
+
+// Finalizes termbox library, should be called after successful initialization
+// when termbox's functionality isn't required anymore.
+func Close() {
+ quit <- 1
+ out.WriteString(funcs[t_show_cursor])
+ out.WriteString(funcs[t_sgr0])
+ out.WriteString(funcs[t_clear_screen])
+ out.WriteString(funcs[t_exit_ca])
+ out.WriteString(funcs[t_exit_keypad])
+ out.WriteString(funcs[t_exit_mouse])
+ tcsetattr(out.Fd(), &orig_tios)
+
+ out.Close()
+ syscall.Close(in)
+
+ // reset the state, so that on next Init() it will work again
+ termw = 0
+ termh = 0
+ input_mode = InputEsc
+ out = nil
+ in = 0
+ lastfg = attr_invalid
+ lastbg = attr_invalid
+ lastx = coord_invalid
+ lasty = coord_invalid
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ IsInit = false
+}
+
+// Synchronizes the internal back buffer with the terminal.
+func Flush() error {
+ // invalidate cursor position
+ lastx = coord_invalid
+ lasty = coord_invalid
+
+ update_size_maybe()
+
+ for y := 0; y < front_buffer.height; y++ {
+ line_offset := y * front_buffer.width
+ for x := 0; x < front_buffer.width; {
+ cell_offset := line_offset + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ if back.Ch < ' ' {
+ back.Ch = ' '
+ }
+ w := runewidth.RuneWidth(back.Ch)
+ if w == 0 || w == 2 && runewidth.IsAmbiguousWidth(back.Ch) {
+ w = 1
+ }
+ if *back == *front {
+ x += w
+ continue
+ }
+ *front = *back
+ send_attr(back.Fg, back.Bg)
+
+ if w == 2 && x == front_buffer.width-1 {
+ // there's not enough space for 2-cells rune,
+ // let's just put a space in there
+ send_char(x, y, ' ')
+ } else {
+ send_char(x, y, back.Ch)
+ if w == 2 {
+ next := cell_offset + 1
+ front_buffer.cells[next] = Cell{
+ Ch: 0,
+ Fg: back.Fg,
+ Bg: back.Bg,
+ }
+ }
+ }
+ x += w
+ }
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+ return flush()
+}
+
+// Sets the position of the cursor. See also HideCursor().
+func SetCursor(x, y int) {
+ if is_cursor_hidden(cursor_x, cursor_y) && !is_cursor_hidden(x, y) {
+ outbuf.WriteString(funcs[t_show_cursor])
+ }
+
+ if !is_cursor_hidden(cursor_x, cursor_y) && is_cursor_hidden(x, y) {
+ outbuf.WriteString(funcs[t_hide_cursor])
+ }
+
+ cursor_x, cursor_y = x, y
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+}
+
+// The shortcut for SetCursor(-1, -1).
+func HideCursor() {
+ SetCursor(cursor_hidden, cursor_hidden)
+}
+
+// Changes cell's parameters in the internal back buffer at the specified
+// position.
+func SetCell(x, y int, ch rune, fg, bg Attribute) {
+ if x < 0 || x >= back_buffer.width {
+ return
+ }
+ if y < 0 || y >= back_buffer.height {
+ return
+ }
+
+ back_buffer.cells[y*back_buffer.width+x] = Cell{ch, fg, bg}
+}
+
+// Returns a slice into the termbox's back buffer. You can get its dimensions
+// using 'Size' function. The slice remains valid as long as no 'Clear' or
+// 'Flush' function calls were made after call to this function.
+func CellBuffer() []Cell {
+ return back_buffer.cells
+}
+
+// After getting a raw event from PollRawEvent function call, you can parse it
+// again into an ordinary one using termbox logic. That is parse an event as
+// termbox would do it. Returned event in addition to usual Event struct fields
+// sets N field to the amount of bytes used within 'data' slice. If the length
+// of 'data' slice is zero or event cannot be parsed for some other reason, the
+// function will return a special event type: EventNone.
+//
+// IMPORTANT: EventNone may contain a non-zero N, which means you should skip
+// these bytes, because termbox cannot recognize them.
+//
+// NOTE: This API is experimental and may change in future.
+func ParseEvent(data []byte) Event {
+ event := Event{Type: EventKey}
+ ok := extract_event(data, &event)
+ if !ok {
+ return Event{Type: EventNone, N: event.N}
+ }
+ return event
+}
+
+// Wait for an event and return it. This is a blocking function call. Instead
+// of EventKey and EventMouse it returns EventRaw events. Raw event is written
+// into `data` slice and Event's N field is set to the amount of bytes written.
+// The minimum required length of the 'data' slice is 1. This requirement may
+// vary on different platforms.
+//
+// NOTE: This API is experimental and may change in future.
+func PollRawEvent(data []byte) Event {
+ if len(data) == 0 {
+ panic("len(data) >= 1 is a requirement")
+ }
+
+ var event Event
+ if extract_raw_event(data, &event) {
+ return event
+ }
+
+ for {
+ select {
+ case ev := <-input_comm:
+ if ev.err != nil {
+ return Event{Type: EventError, Err: ev.err}
+ }
+
+ inbuf = append(inbuf, ev.data...)
+ input_comm <- ev
+ if extract_raw_event(data, &event) {
+ return event
+ }
+ case <-interrupt_comm:
+ event.Type = EventInterrupt
+ return event
+
+ case <-sigwinch:
+ event.Type = EventResize
+ event.Width, event.Height = get_term_size(out.Fd())
+ return event
+ }
+ }
+}
+
+// Wait for an event and return it. This is a blocking function call.
+func PollEvent() Event {
+ var event Event
+
+ // try to extract event from input buffer, return on success
+ event.Type = EventKey
+ ok := extract_event(inbuf, &event)
+ if event.N != 0 {
+ copy(inbuf, inbuf[event.N:])
+ inbuf = inbuf[:len(inbuf)-event.N]
+ }
+ if ok {
+ return event
+ }
+
+ for {
+ select {
+ case ev := <-input_comm:
+ if ev.err != nil {
+ return Event{Type: EventError, Err: ev.err}
+ }
+
+ inbuf = append(inbuf, ev.data...)
+ input_comm <- ev
+ ok := extract_event(inbuf, &event)
+ if event.N != 0 {
+ copy(inbuf, inbuf[event.N:])
+ inbuf = inbuf[:len(inbuf)-event.N]
+ }
+ if ok {
+ return event
+ }
+ case <-interrupt_comm:
+ event.Type = EventInterrupt
+ return event
+
+ case <-sigwinch:
+ event.Type = EventResize
+ event.Width, event.Height = get_term_size(out.Fd())
+ return event
+ }
+ }
+}
+
+// Returns the size of the internal back buffer (which is mostly the same as
+// terminal's window size in characters). But it doesn't always match the size
+// of the terminal window, after the terminal size has changed, the internal
+// back buffer will get in sync only after Clear or Flush function calls.
+func Size() (width int, height int) {
+ return termw, termh
+}
+
+// Clears the internal back buffer.
+func Clear(fg, bg Attribute) error {
+ foreground, background = fg, bg
+ err := update_size_maybe()
+ back_buffer.clear()
+ return err
+}
+
+// Sets termbox input mode. Termbox has two input modes:
+//
+// 1. Esc input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC means KeyEsc. This is the default input mode.
+//
+// 2. Alt input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC enables ModAlt modifier for the next keyboard event.
+//
+// Both input modes can be OR'ed with Mouse mode. Setting Mouse mode bit up will
+// enable mouse button press/release and drag events.
+//
+// If 'mode' is InputCurrent, returns the current input mode. See also Input*
+// constants.
+func SetInputMode(mode InputMode) InputMode {
+ if mode == InputCurrent {
+ return input_mode
+ }
+ if mode&(InputEsc|InputAlt) == 0 {
+ mode |= InputEsc
+ }
+ if mode&(InputEsc|InputAlt) == InputEsc|InputAlt {
+ mode &^= InputAlt
+ }
+ if mode&InputMouse != 0 {
+ out.WriteString(funcs[t_enter_mouse])
+ } else {
+ out.WriteString(funcs[t_exit_mouse])
+ }
+
+ input_mode = mode
+ return input_mode
+}
+
+// Sets the termbox output mode. Termbox has four output options:
+//
+// 1. OutputNormal => [1..8]
+// This mode provides 8 different colors:
+// black, red, green, yellow, blue, magenta, cyan, white
+// Shortcut: ColorBlack, ColorRed, ...
+// Attributes: AttrBold, AttrUnderline, AttrReverse
+//
+// Example usage:
+// SetCell(x, y, '@', ColorBlack | AttrBold, ColorRed);
+//
+// 2. Output256 => [1..256]
+// In this mode you can leverage the 256 terminal mode:
+// 0x01 - 0x08: the 8 colors as in OutputNormal
+// 0x09 - 0x10: Color* | AttrBold
+// 0x11 - 0xe8: 216 different colors
+// 0xe9 - 0x1ff: 24 different shades of grey
+//
+// Example usage:
+// SetCell(x, y, '@', 184, 240);
+// SetCell(x, y, '@', 0xb8, 0xf0);
+//
+// 3. Output216 => [1..216]
+// This mode supports the 3rd range of the 256 mode only.
+// But you dont need to provide an offset.
+//
+// 4. OutputGrayscale => [1..26]
+// This mode supports the 4th range of the 256 mode
+// and black and white colors from 3th range of the 256 mode
+// But you dont need to provide an offset.
+//
+// In all modes, 0x00 represents the default color.
+//
+// `go run _demos/output.go` to see its impact on your terminal.
+//
+// If 'mode' is OutputCurrent, it returns the current output mode.
+//
+// Note that this may return a different OutputMode than the one requested,
+// as the requested mode may not be available on the target platform.
+func SetOutputMode(mode OutputMode) OutputMode {
+ if mode == OutputCurrent {
+ return output_mode
+ }
+
+ output_mode = mode
+ return output_mode
+}
+
+// Sync comes handy when something causes desync between termbox's understanding
+// of a terminal buffer and the reality. Such as a third party process. Sync
+// forces a complete resync between the termbox and a terminal, it may not be
+// visually pretty though.
+func Sync() error {
+ front_buffer.clear()
+ err := send_clear()
+ if err != nil {
+ return err
+ }
+
+ return Flush()
+}
diff --git a/vendor/github.com/nsf/termbox-go/api_common.go b/vendor/github.com/nsf/termbox-go/api_common.go
new file mode 100644
index 0000000..9f23661
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/api_common.go
@@ -0,0 +1,187 @@
+// termbox is a library for creating cross-platform text-based interfaces
+package termbox
+
+// public API, common OS agnostic part
+
+type (
+ InputMode int
+ OutputMode int
+ EventType uint8
+ Modifier uint8
+ Key uint16
+ Attribute uint16
+)
+
+// This type represents a termbox event. The 'Mod', 'Key' and 'Ch' fields are
+// valid if 'Type' is EventKey. The 'Width' and 'Height' fields are valid if
+// 'Type' is EventResize. The 'Err' field is valid if 'Type' is EventError.
+type Event struct {
+ Type EventType // one of Event* constants
+ Mod Modifier // one of Mod* constants or 0
+ Key Key // one of Key* constants, invalid if 'Ch' is not 0
+ Ch rune // a unicode character
+ Width int // width of the screen
+ Height int // height of the screen
+ Err error // error in case if input failed
+ MouseX int // x coord of mouse
+ MouseY int // y coord of mouse
+ N int // number of bytes written when getting a raw event
+}
+
+// A cell, single conceptual entity on the screen. The screen is basically a 2d
+// array of cells. 'Ch' is a unicode character, 'Fg' and 'Bg' are foreground
+// and background attributes respectively.
+type Cell struct {
+ Ch rune
+ Fg Attribute
+ Bg Attribute
+}
+
+// To know if termbox has been initialized or not
+var (
+ IsInit bool = false
+)
+
+// Key constants, see Event.Key field.
+const (
+ KeyF1 Key = 0xFFFF - iota
+ KeyF2
+ KeyF3
+ KeyF4
+ KeyF5
+ KeyF6
+ KeyF7
+ KeyF8
+ KeyF9
+ KeyF10
+ KeyF11
+ KeyF12
+ KeyInsert
+ KeyDelete
+ KeyHome
+ KeyEnd
+ KeyPgup
+ KeyPgdn
+ KeyArrowUp
+ KeyArrowDown
+ KeyArrowLeft
+ KeyArrowRight
+ key_min // see terminfo
+ MouseLeft
+ MouseMiddle
+ MouseRight
+ MouseRelease
+ MouseWheelUp
+ MouseWheelDown
+)
+
+const (
+ KeyCtrlTilde Key = 0x00
+ KeyCtrl2 Key = 0x00
+ KeyCtrlSpace Key = 0x00
+ KeyCtrlA Key = 0x01
+ KeyCtrlB Key = 0x02
+ KeyCtrlC Key = 0x03
+ KeyCtrlD Key = 0x04
+ KeyCtrlE Key = 0x05
+ KeyCtrlF Key = 0x06
+ KeyCtrlG Key = 0x07
+ KeyBackspace Key = 0x08
+ KeyCtrlH Key = 0x08
+ KeyTab Key = 0x09
+ KeyCtrlI Key = 0x09
+ KeyCtrlJ Key = 0x0A
+ KeyCtrlK Key = 0x0B
+ KeyCtrlL Key = 0x0C
+ KeyEnter Key = 0x0D
+ KeyCtrlM Key = 0x0D
+ KeyCtrlN Key = 0x0E
+ KeyCtrlO Key = 0x0F
+ KeyCtrlP Key = 0x10
+ KeyCtrlQ Key = 0x11
+ KeyCtrlR Key = 0x12
+ KeyCtrlS Key = 0x13
+ KeyCtrlT Key = 0x14
+ KeyCtrlU Key = 0x15
+ KeyCtrlV Key = 0x16
+ KeyCtrlW Key = 0x17
+ KeyCtrlX Key = 0x18
+ KeyCtrlY Key = 0x19
+ KeyCtrlZ Key = 0x1A
+ KeyEsc Key = 0x1B
+ KeyCtrlLsqBracket Key = 0x1B
+ KeyCtrl3 Key = 0x1B
+ KeyCtrl4 Key = 0x1C
+ KeyCtrlBackslash Key = 0x1C
+ KeyCtrl5 Key = 0x1D
+ KeyCtrlRsqBracket Key = 0x1D
+ KeyCtrl6 Key = 0x1E
+ KeyCtrl7 Key = 0x1F
+ KeyCtrlSlash Key = 0x1F
+ KeyCtrlUnderscore Key = 0x1F
+ KeySpace Key = 0x20
+ KeyBackspace2 Key = 0x7F
+ KeyCtrl8 Key = 0x7F
+)
+
+// Alt modifier constant, see Event.Mod field and SetInputMode function.
+const (
+ ModAlt Modifier = 1 << iota
+ ModMotion
+)
+
+// Cell colors, you can combine a color with multiple attributes using bitwise
+// OR ('|').
+const (
+ ColorDefault Attribute = iota
+ ColorBlack
+ ColorRed
+ ColorGreen
+ ColorYellow
+ ColorBlue
+ ColorMagenta
+ ColorCyan
+ ColorWhite
+)
+
+// Cell attributes, it is possible to use multiple attributes by combining them
+// using bitwise OR ('|'). Although, colors cannot be combined. But you can
+// combine attributes and a single color.
+//
+// It's worth mentioning that some platforms don't support certain attibutes.
+// For example windows console doesn't support AttrUnderline. And on some
+// terminals applying AttrBold to background may result in blinking text. Use
+// them with caution and test your code on various terminals.
+const (
+ AttrBold Attribute = 1 << (iota + 9)
+ AttrUnderline
+ AttrReverse
+)
+
+// Input mode. See SetInputMode function.
+const (
+ InputEsc InputMode = 1 << iota
+ InputAlt
+ InputMouse
+ InputCurrent InputMode = 0
+)
+
+// Output mode. See SetOutputMode function.
+const (
+ OutputCurrent OutputMode = iota
+ OutputNormal
+ Output256
+ Output216
+ OutputGrayscale
+)
+
+// Event type. See Event.Type field.
+const (
+ EventKey EventType = iota
+ EventResize
+ EventMouse
+ EventError
+ EventInterrupt
+ EventRaw
+ EventNone
+)
diff --git a/vendor/github.com/nsf/termbox-go/api_windows.go b/vendor/github.com/nsf/termbox-go/api_windows.go
new file mode 100644
index 0000000..7def30a
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/api_windows.go
@@ -0,0 +1,239 @@
+package termbox
+
+import (
+ "syscall"
+)
+
+// public API
+
+// Initializes termbox library. This function should be called before any other functions.
+// After successful initialization, the library must be finalized using 'Close' function.
+//
+// Example usage:
+// err := termbox.Init()
+// if err != nil {
+// panic(err)
+// }
+// defer termbox.Close()
+func Init() error {
+ var err error
+
+ interrupt, err = create_event()
+ if err != nil {
+ return err
+ }
+
+ in, err = syscall.Open("CONIN$", syscall.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+ out, err = syscall.Open("CONOUT$", syscall.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+
+ err = get_console_mode(in, &orig_mode)
+ if err != nil {
+ return err
+ }
+
+ err = set_console_mode(in, enable_window_input)
+ if err != nil {
+ return err
+ }
+
+ orig_size = get_term_size(out)
+ win_size := get_win_size(out)
+
+ err = set_console_screen_buffer_size(out, win_size)
+ if err != nil {
+ return err
+ }
+
+ err = get_console_cursor_info(out, &orig_cursor_info)
+ if err != nil {
+ return err
+ }
+
+ show_cursor(false)
+ term_size = get_term_size(out)
+ back_buffer.init(int(term_size.x), int(term_size.y))
+ front_buffer.init(int(term_size.x), int(term_size.y))
+ back_buffer.clear()
+ front_buffer.clear()
+ clear()
+
+ diffbuf = make([]diff_msg, 0, 32)
+
+ go input_event_producer()
+ IsInit = true
+ return nil
+}
+
+// Finalizes termbox library, should be called after successful initialization
+// when termbox's functionality isn't required anymore.
+func Close() {
+ // we ignore errors here, because we can't really do anything about them
+ Clear(0, 0)
+ Flush()
+
+ // stop event producer
+ cancel_comm <- true
+ set_event(interrupt)
+ select {
+ case <-input_comm:
+ default:
+ }
+ <-cancel_done_comm
+
+ set_console_cursor_info(out, &orig_cursor_info)
+ set_console_cursor_position(out, coord{})
+ set_console_screen_buffer_size(out, orig_size)
+ set_console_mode(in, orig_mode)
+ syscall.Close(in)
+ syscall.Close(out)
+ syscall.Close(interrupt)
+ IsInit = false
+}
+
+// Interrupt an in-progress call to PollEvent by causing it to return
+// EventInterrupt. Note that this function will block until the PollEvent
+// function has successfully been interrupted.
+func Interrupt() {
+ interrupt_comm <- struct{}{}
+}
+
+// Synchronizes the internal back buffer with the terminal.
+func Flush() error {
+ update_size_maybe()
+ prepare_diff_messages()
+ for _, diff := range diffbuf {
+ r := small_rect{
+ left: 0,
+ top: diff.pos,
+ right: term_size.x - 1,
+ bottom: diff.pos + diff.lines - 1,
+ }
+ write_console_output(out, diff.chars, r)
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+ return nil
+}
+
+// Sets the position of the cursor. See also HideCursor().
+func SetCursor(x, y int) {
+ if is_cursor_hidden(cursor_x, cursor_y) && !is_cursor_hidden(x, y) {
+ show_cursor(true)
+ }
+
+ if !is_cursor_hidden(cursor_x, cursor_y) && is_cursor_hidden(x, y) {
+ show_cursor(false)
+ }
+
+ cursor_x, cursor_y = x, y
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+}
+
+// The shortcut for SetCursor(-1, -1).
+func HideCursor() {
+ SetCursor(cursor_hidden, cursor_hidden)
+}
+
+// Changes cell's parameters in the internal back buffer at the specified
+// position.
+func SetCell(x, y int, ch rune, fg, bg Attribute) {
+ if x < 0 || x >= back_buffer.width {
+ return
+ }
+ if y < 0 || y >= back_buffer.height {
+ return
+ }
+
+ back_buffer.cells[y*back_buffer.width+x] = Cell{ch, fg, bg}
+}
+
+// Returns a slice into the termbox's back buffer. You can get its dimensions
+// using 'Size' function. The slice remains valid as long as no 'Clear' or
+// 'Flush' function calls were made after call to this function.
+func CellBuffer() []Cell {
+ return back_buffer.cells
+}
+
+// Wait for an event and return it. This is a blocking function call.
+func PollEvent() Event {
+ select {
+ case ev := <-input_comm:
+ return ev
+ case <-interrupt_comm:
+ return Event{Type: EventInterrupt}
+ }
+}
+
+// Returns the size of the internal back buffer (which is mostly the same as
+// console's window size in characters). But it doesn't always match the size
+// of the console window, after the console size has changed, the internal back
+// buffer will get in sync only after Clear or Flush function calls.
+func Size() (int, int) {
+ return int(term_size.x), int(term_size.y)
+}
+
+// Clears the internal back buffer.
+func Clear(fg, bg Attribute) error {
+ foreground, background = fg, bg
+ update_size_maybe()
+ back_buffer.clear()
+ return nil
+}
+
+// Sets termbox input mode. Termbox has two input modes:
+//
+// 1. Esc input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC means KeyEsc. This is the default input mode.
+//
+// 2. Alt input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC enables ModAlt modifier for the next keyboard event.
+//
+// Both input modes can be OR'ed with Mouse mode. Setting Mouse mode bit up will
+// enable mouse button press/release and drag events.
+//
+// If 'mode' is InputCurrent, returns the current input mode. See also Input*
+// constants.
+func SetInputMode(mode InputMode) InputMode {
+ if mode == InputCurrent {
+ return input_mode
+ }
+ if mode&InputMouse != 0 {
+ err := set_console_mode(in, enable_window_input|enable_mouse_input|enable_extended_flags)
+ if err != nil {
+ panic(err)
+ }
+ } else {
+ err := set_console_mode(in, enable_window_input)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ input_mode = mode
+ return input_mode
+}
+
+// Sets the termbox output mode.
+//
+// Windows console does not support extra colour modes,
+// so this will always set and return OutputNormal.
+func SetOutputMode(mode OutputMode) OutputMode {
+ return OutputNormal
+}
+
+// Sync comes handy when something causes desync between termbox's understanding
+// of a terminal buffer and the reality. Such as a third party process. Sync
+// forces a complete resync between the termbox and a terminal, it may not be
+// visually pretty though. At the moment on Windows it does nothing.
+func Sync() error {
+ return nil
+}
diff --git a/vendor/github.com/nsf/termbox-go/collect_terminfo.py b/vendor/github.com/nsf/termbox-go/collect_terminfo.py
new file mode 100755
index 0000000..5e50975
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/collect_terminfo.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+import sys, os, subprocess
+
+def escaped(s):
+ return repr(s)[1:-1]
+
+def tput(term, name):
+ try:
+ return subprocess.check_output(['tput', '-T%s' % term, name]).decode()
+ except subprocess.CalledProcessError as e:
+ return e.output.decode()
+
+
+def w(s):
+ if s == None:
+ return
+ sys.stdout.write(s)
+
+terminals = {
+ 'xterm' : 'xterm',
+ 'rxvt-256color' : 'rxvt_256color',
+ 'rxvt-unicode' : 'rxvt_unicode',
+ 'linux' : 'linux',
+ 'Eterm' : 'eterm',
+ 'screen' : 'screen'
+}
+
+keys = [
+ "F1", "kf1",
+ "F2", "kf2",
+ "F3", "kf3",
+ "F4", "kf4",
+ "F5", "kf5",
+ "F6", "kf6",
+ "F7", "kf7",
+ "F8", "kf8",
+ "F9", "kf9",
+ "F10", "kf10",
+ "F11", "kf11",
+ "F12", "kf12",
+ "INSERT", "kich1",
+ "DELETE", "kdch1",
+ "HOME", "khome",
+ "END", "kend",
+ "PGUP", "kpp",
+ "PGDN", "knp",
+ "KEY_UP", "kcuu1",
+ "KEY_DOWN", "kcud1",
+ "KEY_LEFT", "kcub1",
+ "KEY_RIGHT", "kcuf1"
+]
+
+funcs = [
+ "T_ENTER_CA", "smcup",
+ "T_EXIT_CA", "rmcup",
+ "T_SHOW_CURSOR", "cnorm",
+ "T_HIDE_CURSOR", "civis",
+ "T_CLEAR_SCREEN", "clear",
+ "T_SGR0", "sgr0",
+ "T_UNDERLINE", "smul",
+ "T_BOLD", "bold",
+ "T_BLINK", "blink",
+ "T_REVERSE", "rev",
+ "T_ENTER_KEYPAD", "smkx",
+ "T_EXIT_KEYPAD", "rmkx"
+]
+
+def iter_pairs(iterable):
+ iterable = iter(iterable)
+ while True:
+ yield (next(iterable), next(iterable))
+
+def do_term(term, nick):
+ w("// %s\n" % term)
+ w("var %s_keys = []string{\n\t" % nick)
+ for k, v in iter_pairs(keys):
+ w('"')
+ w(escaped(tput(term, v)))
+ w('",')
+ w("\n}\n")
+ w("var %s_funcs = []string{\n\t" % nick)
+ for k,v in iter_pairs(funcs):
+ w('"')
+ if v == "sgr":
+ w("\\033[3%d;4%dm")
+ elif v == "cup":
+ w("\\033[%d;%dH")
+ else:
+ w(escaped(tput(term, v)))
+ w('", ')
+ w("\n}\n\n")
+
+def do_terms(d):
+ w("var terms = []struct {\n")
+ w("\tname string\n")
+ w("\tkeys []string\n")
+ w("\tfuncs []string\n")
+ w("}{\n")
+ for k, v in d.items():
+ w('\t{"%s", %s_keys, %s_funcs},\n' % (k, v, v))
+ w("}\n\n")
+
+w("// +build !windows\n\npackage termbox\n\n")
+
+for k,v in terminals.items():
+ do_term(k, v)
+
+do_terms(terminals)
+
diff --git a/vendor/github.com/nsf/termbox-go/syscalls.go b/vendor/github.com/nsf/termbox-go/syscalls.go
new file mode 100644
index 0000000..4f52bb9
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls.go
@@ -0,0 +1,39 @@
+// +build ignore
+
+package termbox
+
+/*
+#include
+#include
+*/
+import "C"
+
+type syscall_Termios C.struct_termios
+
+const (
+ syscall_IGNBRK = C.IGNBRK
+ syscall_BRKINT = C.BRKINT
+ syscall_PARMRK = C.PARMRK
+ syscall_ISTRIP = C.ISTRIP
+ syscall_INLCR = C.INLCR
+ syscall_IGNCR = C.IGNCR
+ syscall_ICRNL = C.ICRNL
+ syscall_IXON = C.IXON
+ syscall_OPOST = C.OPOST
+ syscall_ECHO = C.ECHO
+ syscall_ECHONL = C.ECHONL
+ syscall_ICANON = C.ICANON
+ syscall_ISIG = C.ISIG
+ syscall_IEXTEN = C.IEXTEN
+ syscall_CSIZE = C.CSIZE
+ syscall_PARENB = C.PARENB
+ syscall_CS8 = C.CS8
+ syscall_VMIN = C.VMIN
+ syscall_VTIME = C.VTIME
+
+ // on darwin change these to (on *bsd too?):
+ // C.TIOCGETA
+ // C.TIOCSETA
+ syscall_TCGETS = C.TCGETS
+ syscall_TCSETS = C.TCSETS
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_darwin.go b/vendor/github.com/nsf/termbox-go/syscalls_darwin.go
new file mode 100644
index 0000000..25b78f7
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_darwin.go
@@ -0,0 +1,41 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+// +build !amd64
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go b/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go
new file mode 100644
index 0000000..11f25be
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go
@@ -0,0 +1,40 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint64
+ Oflag uint64
+ Cflag uint64
+ Lflag uint64
+ Cc [20]uint8
+ Pad_cgo_0 [4]byte
+ Ispeed uint64
+ Ospeed uint64
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x40487413
+ syscall_TCSETS = 0x80487414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go b/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go
new file mode 100644
index 0000000..e03624e
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go b/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go
new file mode 100644
index 0000000..e03624e
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_linux.go b/vendor/github.com/nsf/termbox-go/syscalls_linux.go
new file mode 100644
index 0000000..b88960d
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_linux.go
@@ -0,0 +1,33 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+import "syscall"
+
+type syscall_Termios syscall.Termios
+
+const (
+ syscall_IGNBRK = syscall.IGNBRK
+ syscall_BRKINT = syscall.BRKINT
+ syscall_PARMRK = syscall.PARMRK
+ syscall_ISTRIP = syscall.ISTRIP
+ syscall_INLCR = syscall.INLCR
+ syscall_IGNCR = syscall.IGNCR
+ syscall_ICRNL = syscall.ICRNL
+ syscall_IXON = syscall.IXON
+ syscall_OPOST = syscall.OPOST
+ syscall_ECHO = syscall.ECHO
+ syscall_ECHONL = syscall.ECHONL
+ syscall_ICANON = syscall.ICANON
+ syscall_ISIG = syscall.ISIG
+ syscall_IEXTEN = syscall.IEXTEN
+ syscall_CSIZE = syscall.CSIZE
+ syscall_PARENB = syscall.PARENB
+ syscall_CS8 = syscall.CS8
+ syscall_VMIN = syscall.VMIN
+ syscall_VTIME = syscall.VTIME
+
+ syscall_TCGETS = syscall.TCGETS
+ syscall_TCSETS = syscall.TCSETS
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go b/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go
new file mode 100644
index 0000000..49a3355
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go b/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go
new file mode 100644
index 0000000..49a3355
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_windows.go b/vendor/github.com/nsf/termbox-go/syscalls_windows.go
new file mode 100644
index 0000000..472d002
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_windows.go
@@ -0,0 +1,61 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- -DUNICODE syscalls.go
+
+package termbox
+
+const (
+ foreground_blue = 0x1
+ foreground_green = 0x2
+ foreground_red = 0x4
+ foreground_intensity = 0x8
+ background_blue = 0x10
+ background_green = 0x20
+ background_red = 0x40
+ background_intensity = 0x80
+ std_input_handle = -0xa
+ std_output_handle = -0xb
+ key_event = 0x1
+ mouse_event = 0x2
+ window_buffer_size_event = 0x4
+ enable_window_input = 0x8
+ enable_mouse_input = 0x10
+ enable_extended_flags = 0x80
+
+ vk_f1 = 0x70
+ vk_f2 = 0x71
+ vk_f3 = 0x72
+ vk_f4 = 0x73
+ vk_f5 = 0x74
+ vk_f6 = 0x75
+ vk_f7 = 0x76
+ vk_f8 = 0x77
+ vk_f9 = 0x78
+ vk_f10 = 0x79
+ vk_f11 = 0x7a
+ vk_f12 = 0x7b
+ vk_insert = 0x2d
+ vk_delete = 0x2e
+ vk_home = 0x24
+ vk_end = 0x23
+ vk_pgup = 0x21
+ vk_pgdn = 0x22
+ vk_arrow_up = 0x26
+ vk_arrow_down = 0x28
+ vk_arrow_left = 0x25
+ vk_arrow_right = 0x27
+ vk_backspace = 0x8
+ vk_tab = 0x9
+ vk_enter = 0xd
+ vk_esc = 0x1b
+ vk_space = 0x20
+
+ left_alt_pressed = 0x2
+ left_ctrl_pressed = 0x8
+ right_alt_pressed = 0x1
+ right_ctrl_pressed = 0x4
+ shift_pressed = 0x10
+
+ generic_read = 0x80000000
+ generic_write = 0x40000000
+ console_textmode_buffer = 0x1
+)
diff --git a/vendor/github.com/nsf/termbox-go/termbox.go b/vendor/github.com/nsf/termbox-go/termbox.go
new file mode 100644
index 0000000..c2d86c6
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/termbox.go
@@ -0,0 +1,511 @@
+// +build !windows
+
+package termbox
+
+import "unicode/utf8"
+import "bytes"
+import "syscall"
+import "unsafe"
+import "strings"
+import "strconv"
+import "os"
+import "io"
+
+// private API
+
+const (
+ t_enter_ca = iota
+ t_exit_ca
+ t_show_cursor
+ t_hide_cursor
+ t_clear_screen
+ t_sgr0
+ t_underline
+ t_bold
+ t_blink
+ t_reverse
+ t_enter_keypad
+ t_exit_keypad
+ t_enter_mouse
+ t_exit_mouse
+ t_max_funcs
+)
+
+const (
+ coord_invalid = -2
+ attr_invalid = Attribute(0xFFFF)
+)
+
+type input_event struct {
+ data []byte
+ err error
+}
+
+var (
+ // term specific sequences
+ keys []string
+ funcs []string
+
+ // termbox inner state
+ orig_tios syscall_Termios
+ back_buffer cellbuf
+ front_buffer cellbuf
+ termw int
+ termh int
+ input_mode = InputEsc
+ output_mode = OutputNormal
+ out *os.File
+ in int
+ lastfg = attr_invalid
+ lastbg = attr_invalid
+ lastx = coord_invalid
+ lasty = coord_invalid
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ inbuf = make([]byte, 0, 64)
+ outbuf bytes.Buffer
+ sigwinch = make(chan os.Signal, 1)
+ sigio = make(chan os.Signal, 1)
+ quit = make(chan int)
+ input_comm = make(chan input_event)
+ interrupt_comm = make(chan struct{})
+ intbuf = make([]byte, 0, 16)
+
+ // grayscale indexes
+ grayscale = []Attribute{
+ 0, 17, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 232,
+ }
+)
+
+func write_cursor(x, y int) {
+ outbuf.WriteString("\033[")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(y+1), 10))
+ outbuf.WriteString(";")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(x+1), 10))
+ outbuf.WriteString("H")
+}
+
+func write_sgr_fg(a Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[38;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[3")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+func write_sgr_bg(a Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[48;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[4")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+func write_sgr(fg, bg Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[38;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10))
+ outbuf.WriteString("m")
+ outbuf.WriteString("\033[48;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[3")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10))
+ outbuf.WriteString(";4")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+type winsize struct {
+ rows uint16
+ cols uint16
+ xpixels uint16
+ ypixels uint16
+}
+
+func get_term_size(fd uintptr) (int, int) {
+ var sz winsize
+ _, _, _ = syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz)))
+ return int(sz.cols), int(sz.rows)
+}
+
+func send_attr(fg, bg Attribute) {
+ if fg == lastfg && bg == lastbg {
+ return
+ }
+
+ outbuf.WriteString(funcs[t_sgr0])
+
+ var fgcol, bgcol Attribute
+
+ switch output_mode {
+ case Output256:
+ fgcol = fg & 0x1FF
+ bgcol = bg & 0x1FF
+ case Output216:
+ fgcol = fg & 0xFF
+ bgcol = bg & 0xFF
+ if fgcol > 216 {
+ fgcol = ColorDefault
+ }
+ if bgcol > 216 {
+ bgcol = ColorDefault
+ }
+ if fgcol != ColorDefault {
+ fgcol += 0x10
+ }
+ if bgcol != ColorDefault {
+ bgcol += 0x10
+ }
+ case OutputGrayscale:
+ fgcol = fg & 0x1F
+ bgcol = bg & 0x1F
+ if fgcol > 26 {
+ fgcol = ColorDefault
+ }
+ if bgcol > 26 {
+ bgcol = ColorDefault
+ }
+ if fgcol != ColorDefault {
+ fgcol = grayscale[fgcol]
+ }
+ if bgcol != ColorDefault {
+ bgcol = grayscale[bgcol]
+ }
+ default:
+ fgcol = fg & 0x0F
+ bgcol = bg & 0x0F
+ }
+
+ if fgcol != ColorDefault {
+ if bgcol != ColorDefault {
+ write_sgr(fgcol, bgcol)
+ } else {
+ write_sgr_fg(fgcol)
+ }
+ } else if bgcol != ColorDefault {
+ write_sgr_bg(bgcol)
+ }
+
+ if fg&AttrBold != 0 {
+ outbuf.WriteString(funcs[t_bold])
+ }
+ if bg&AttrBold != 0 {
+ outbuf.WriteString(funcs[t_blink])
+ }
+ if fg&AttrUnderline != 0 {
+ outbuf.WriteString(funcs[t_underline])
+ }
+ if fg&AttrReverse|bg&AttrReverse != 0 {
+ outbuf.WriteString(funcs[t_reverse])
+ }
+
+ lastfg, lastbg = fg, bg
+}
+
+func send_char(x, y int, ch rune) {
+ var buf [8]byte
+ n := utf8.EncodeRune(buf[:], ch)
+ if x-1 != lastx || y != lasty {
+ write_cursor(x, y)
+ }
+ lastx, lasty = x, y
+ outbuf.Write(buf[:n])
+}
+
+func flush() error {
+ _, err := io.Copy(out, &outbuf)
+ outbuf.Reset()
+ return err
+}
+
+func send_clear() error {
+ send_attr(foreground, background)
+ outbuf.WriteString(funcs[t_clear_screen])
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+
+ // we need to invalidate cursor position too and these two vars are
+ // used only for simple cursor positioning optimization, cursor
+ // actually may be in the correct place, but we simply discard
+ // optimization once and it gives us simple solution for the case when
+ // cursor moved
+ lastx = coord_invalid
+ lasty = coord_invalid
+
+ return flush()
+}
+
+func update_size_maybe() error {
+ w, h := get_term_size(out.Fd())
+ if w != termw || h != termh {
+ termw, termh = w, h
+ back_buffer.resize(termw, termh)
+ front_buffer.resize(termw, termh)
+ front_buffer.clear()
+ return send_clear()
+ }
+ return nil
+}
+
+func tcsetattr(fd uintptr, termios *syscall_Termios) error {
+ r, _, e := syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall_TCSETS), uintptr(unsafe.Pointer(termios)))
+ if r != 0 {
+ return os.NewSyscallError("SYS_IOCTL", e)
+ }
+ return nil
+}
+
+func tcgetattr(fd uintptr, termios *syscall_Termios) error {
+ r, _, e := syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall_TCGETS), uintptr(unsafe.Pointer(termios)))
+ if r != 0 {
+ return os.NewSyscallError("SYS_IOCTL", e)
+ }
+ return nil
+}
+
+func parse_mouse_event(event *Event, buf string) (int, bool) {
+ if strings.HasPrefix(buf, "\033[M") && len(buf) >= 6 {
+ // X10 mouse encoding, the simplest one
+ // \033 [ M Cb Cx Cy
+ b := buf[3] - 32
+ switch b & 3 {
+ case 0:
+ if b&64 != 0 {
+ event.Key = MouseWheelUp
+ } else {
+ event.Key = MouseLeft
+ }
+ case 1:
+ if b&64 != 0 {
+ event.Key = MouseWheelDown
+ } else {
+ event.Key = MouseMiddle
+ }
+ case 2:
+ event.Key = MouseRight
+ case 3:
+ event.Key = MouseRelease
+ default:
+ return 6, false
+ }
+ event.Type = EventMouse // KeyEvent by default
+ if b&32 != 0 {
+ event.Mod |= ModMotion
+ }
+
+ // the coord is 1,1 for upper left
+ event.MouseX = int(buf[4]) - 1 - 32
+ event.MouseY = int(buf[5]) - 1 - 32
+ return 6, true
+ } else if strings.HasPrefix(buf, "\033[<") || strings.HasPrefix(buf, "\033[") {
+ // xterm 1006 extended mode or urxvt 1015 extended mode
+ // xterm: \033 [ < Cb ; Cx ; Cy (M or m)
+ // urxvt: \033 [ Cb ; Cx ; Cy M
+
+ // find the first M or m, that's where we stop
+ mi := strings.IndexAny(buf, "Mm")
+ if mi == -1 {
+ return 0, false
+ }
+
+ // whether it's a capital M or not
+ isM := buf[mi] == 'M'
+
+ // whether it's urxvt or not
+ isU := false
+
+ // buf[2] is safe here, because having M or m found means we have at
+ // least 3 bytes in a string
+ if buf[2] == '<' {
+ buf = buf[3:mi]
+ } else {
+ isU = true
+ buf = buf[2:mi]
+ }
+
+ s1 := strings.Index(buf, ";")
+ s2 := strings.LastIndex(buf, ";")
+ // not found or only one ';'
+ if s1 == -1 || s2 == -1 || s1 == s2 {
+ return 0, false
+ }
+
+ n1, err := strconv.ParseInt(buf[0:s1], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ n2, err := strconv.ParseInt(buf[s1+1:s2], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ n3, err := strconv.ParseInt(buf[s2+1:], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+
+ // on urxvt, first number is encoded exactly as in X10, but we need to
+ // make it zero-based, on xterm it is zero-based already
+ if isU {
+ n1 -= 32
+ }
+ switch n1 & 3 {
+ case 0:
+ if n1&64 != 0 {
+ event.Key = MouseWheelUp
+ } else {
+ event.Key = MouseLeft
+ }
+ case 1:
+ if n1&64 != 0 {
+ event.Key = MouseWheelDown
+ } else {
+ event.Key = MouseMiddle
+ }
+ case 2:
+ event.Key = MouseRight
+ case 3:
+ event.Key = MouseRelease
+ default:
+ return mi + 1, false
+ }
+ if !isM {
+ // on xterm mouse release is signaled by lowercase m
+ event.Key = MouseRelease
+ }
+
+ event.Type = EventMouse // KeyEvent by default
+ if n1&32 != 0 {
+ event.Mod |= ModMotion
+ }
+
+ event.MouseX = int(n2) - 1
+ event.MouseY = int(n3) - 1
+ return mi + 1, true
+ }
+
+ return 0, false
+}
+
+func parse_escape_sequence(event *Event, buf []byte) (int, bool) {
+ bufstr := string(buf)
+ for i, key := range keys {
+ if strings.HasPrefix(bufstr, key) {
+ event.Ch = 0
+ event.Key = Key(0xFFFF - i)
+ return len(key), true
+ }
+ }
+
+ // if none of the keys match, let's try mouse seqences
+ return parse_mouse_event(event, bufstr)
+}
+
+func extract_raw_event(data []byte, event *Event) bool {
+ if len(inbuf) == 0 {
+ return false
+ }
+
+ n := len(data)
+ if n == 0 {
+ return false
+ }
+
+ n = copy(data, inbuf)
+ copy(inbuf, inbuf[n:])
+ inbuf = inbuf[:len(inbuf)-n]
+
+ event.N = n
+ event.Type = EventRaw
+ return true
+}
+
+func extract_event(inbuf []byte, event *Event) bool {
+ if len(inbuf) == 0 {
+ event.N = 0
+ return false
+ }
+
+ if inbuf[0] == '\033' {
+ // possible escape sequence
+ if n, ok := parse_escape_sequence(event, inbuf); n != 0 {
+ event.N = n
+ return ok
+ }
+
+ // it's not escape sequence, then it's Alt or Esc, check input_mode
+ switch {
+ case input_mode&InputEsc != 0:
+ // if we're in escape mode, fill Esc event, pop buffer, return success
+ event.Ch = 0
+ event.Key = KeyEsc
+ event.Mod = 0
+ event.N = 1
+ return true
+ case input_mode&InputAlt != 0:
+ // if we're in alt mode, set Alt modifier to event and redo parsing
+ event.Mod = ModAlt
+ ok := extract_event(inbuf[1:], event)
+ if ok {
+ event.N++
+ } else {
+ event.N = 0
+ }
+ return ok
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // if we're here, this is not an escape sequence and not an alt sequence
+ // so, it's a FUNCTIONAL KEY or a UNICODE character
+
+ // first of all check if it's a functional key
+ if Key(inbuf[0]) <= KeySpace || Key(inbuf[0]) == KeyBackspace2 {
+ // fill event, pop buffer, return success
+ event.Ch = 0
+ event.Key = Key(inbuf[0])
+ event.N = 1
+ return true
+ }
+
+ // the only possible option is utf8 rune
+ if r, n := utf8.DecodeRune(inbuf); r != utf8.RuneError {
+ event.Ch = r
+ event.Key = 0
+ event.N = n
+ return true
+ }
+
+ return false
+}
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r, _, e := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd),
+ uintptr(arg))
+ val = int(r)
+ if e != 0 {
+ err = e
+ }
+ return
+}
diff --git a/vendor/github.com/nsf/termbox-go/termbox_common.go b/vendor/github.com/nsf/termbox-go/termbox_common.go
new file mode 100644
index 0000000..c3355cc
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/termbox_common.go
@@ -0,0 +1,59 @@
+package termbox
+
+// private API, common OS agnostic part
+
+type cellbuf struct {
+ width int
+ height int
+ cells []Cell
+}
+
+func (this *cellbuf) init(width, height int) {
+ this.width = width
+ this.height = height
+ this.cells = make([]Cell, width*height)
+}
+
+func (this *cellbuf) resize(width, height int) {
+ if this.width == width && this.height == height {
+ return
+ }
+
+ oldw := this.width
+ oldh := this.height
+ oldcells := this.cells
+
+ this.init(width, height)
+ this.clear()
+
+ minw, minh := oldw, oldh
+
+ if width < minw {
+ minw = width
+ }
+ if height < minh {
+ minh = height
+ }
+
+ for i := 0; i < minh; i++ {
+ srco, dsto := i*oldw, i*width
+ src := oldcells[srco : srco+minw]
+ dst := this.cells[dsto : dsto+minw]
+ copy(dst, src)
+ }
+}
+
+func (this *cellbuf) clear() {
+ for i := range this.cells {
+ c := &this.cells[i]
+ c.Ch = ' '
+ c.Fg = foreground
+ c.Bg = background
+ }
+}
+
+const cursor_hidden = -1
+
+func is_cursor_hidden(x, y int) bool {
+ return x == cursor_hidden || y == cursor_hidden
+}
diff --git a/vendor/github.com/nsf/termbox-go/termbox_windows.go b/vendor/github.com/nsf/termbox-go/termbox_windows.go
new file mode 100644
index 0000000..f7dad7b
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/termbox_windows.go
@@ -0,0 +1,856 @@
+package termbox
+
+import "syscall"
+import "unsafe"
+import "unicode/utf16"
+import "github.com/mattn/go-runewidth"
+
+type (
+ wchar uint16
+ short int16
+ dword uint32
+ word uint16
+ char_info struct {
+ char wchar
+ attr word
+ }
+ coord struct {
+ x short
+ y short
+ }
+ small_rect struct {
+ left short
+ top short
+ right short
+ bottom short
+ }
+ console_screen_buffer_info struct {
+ size coord
+ cursor_position coord
+ attributes word
+ window small_rect
+ maximum_window_size coord
+ }
+ console_cursor_info struct {
+ size dword
+ visible int32
+ }
+ input_record struct {
+ event_type word
+ _ [2]byte
+ event [16]byte
+ }
+ key_event_record struct {
+ key_down int32
+ repeat_count word
+ virtual_key_code word
+ virtual_scan_code word
+ unicode_char wchar
+ control_key_state dword
+ }
+ window_buffer_size_record struct {
+ size coord
+ }
+ mouse_event_record struct {
+ mouse_pos coord
+ button_state dword
+ control_key_state dword
+ event_flags dword
+ }
+)
+
+const (
+ mouse_lmb = 0x1
+ mouse_rmb = 0x2
+ mouse_mmb = 0x4 | 0x8 | 0x10
+)
+
+func (this coord) uintptr() uintptr {
+ return uintptr(*(*int32)(unsafe.Pointer(&this)))
+}
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var is_cjk = runewidth.IsEastAsian()
+
+var (
+ proc_set_console_active_screen_buffer = kernel32.NewProc("SetConsoleActiveScreenBuffer")
+ proc_set_console_screen_buffer_size = kernel32.NewProc("SetConsoleScreenBufferSize")
+ proc_create_console_screen_buffer = kernel32.NewProc("CreateConsoleScreenBuffer")
+ proc_get_console_screen_buffer_info = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ proc_write_console_output = kernel32.NewProc("WriteConsoleOutputW")
+ proc_write_console_output_character = kernel32.NewProc("WriteConsoleOutputCharacterW")
+ proc_write_console_output_attribute = kernel32.NewProc("WriteConsoleOutputAttribute")
+ proc_set_console_cursor_info = kernel32.NewProc("SetConsoleCursorInfo")
+ proc_set_console_cursor_position = kernel32.NewProc("SetConsoleCursorPosition")
+ proc_get_console_cursor_info = kernel32.NewProc("GetConsoleCursorInfo")
+ proc_read_console_input = kernel32.NewProc("ReadConsoleInputW")
+ proc_get_console_mode = kernel32.NewProc("GetConsoleMode")
+ proc_set_console_mode = kernel32.NewProc("SetConsoleMode")
+ proc_fill_console_output_character = kernel32.NewProc("FillConsoleOutputCharacterW")
+ proc_fill_console_output_attribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ proc_create_event = kernel32.NewProc("CreateEventW")
+ proc_wait_for_multiple_objects = kernel32.NewProc("WaitForMultipleObjects")
+ proc_set_event = kernel32.NewProc("SetEvent")
+)
+
+func set_console_active_screen_buffer(h syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_active_screen_buffer.Addr(),
+ 1, uintptr(h), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_screen_buffer_size(h syscall.Handle, size coord) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_screen_buffer_size.Addr(),
+ 2, uintptr(h), size.uintptr(), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func create_console_screen_buffer() (h syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(proc_create_console_screen_buffer.Addr(),
+ 5, uintptr(generic_read|generic_write), 0, 0, console_textmode_buffer, 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return syscall.Handle(r0), err
+}
+
+func get_console_screen_buffer_info(h syscall.Handle, info *console_screen_buffer_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_screen_buffer_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output(h syscall.Handle, chars []char_info, dst small_rect) (err error) {
+ tmp_coord = coord{dst.right - dst.left + 1, dst.bottom - dst.top + 1}
+ tmp_rect = dst
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&chars[0])), tmp_coord.uintptr(),
+ tmp_coord0.uintptr(), uintptr(unsafe.Pointer(&tmp_rect)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output_character(h syscall.Handle, chars []wchar, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output_character.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&chars[0])), uintptr(len(chars)),
+ pos.uintptr(), uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output_attribute(h syscall.Handle, attrs []word, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output_attribute.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&attrs[0])), uintptr(len(attrs)),
+ pos.uintptr(), uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_cursor_info(h syscall.Handle, info *console_cursor_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_cursor_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func get_console_cursor_info(h syscall.Handle, info *console_cursor_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_cursor_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_cursor_position(h syscall.Handle, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_cursor_position.Addr(),
+ 2, uintptr(h), pos.uintptr(), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func read_console_input(h syscall.Handle, record *input_record) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_read_console_input.Addr(),
+ 4, uintptr(h), uintptr(unsafe.Pointer(record)), 1, uintptr(unsafe.Pointer(&tmp_arg)), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func get_console_mode(h syscall.Handle, mode *dword) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_mode.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(mode)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_mode(h syscall.Handle, mode dword) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_mode.Addr(),
+ 2, uintptr(h), uintptr(mode), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func fill_console_output_character(h syscall.Handle, char wchar, n int) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_fill_console_output_character.Addr(),
+ 5, uintptr(h), uintptr(char), uintptr(n), tmp_coord.uintptr(),
+ uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func fill_console_output_attribute(h syscall.Handle, attr word, n int) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_fill_console_output_attribute.Addr(),
+ 5, uintptr(h), uintptr(attr), uintptr(n), tmp_coord.uintptr(),
+ uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func create_event() (out syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(proc_create_event.Addr(),
+ 4, 0, 0, 0, 0, 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return syscall.Handle(r0), err
+}
+
+func wait_for_multiple_objects(objects []syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_wait_for_multiple_objects.Addr(),
+ 4, uintptr(len(objects)), uintptr(unsafe.Pointer(&objects[0])),
+ 0, 0xFFFFFFFF, 0, 0)
+ if uint32(r0) == 0xFFFFFFFF {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_event(ev syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_event.Addr(),
+ 1, uintptr(ev), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+type diff_msg struct {
+ pos short
+ lines short
+ chars []char_info
+}
+
+type input_event struct {
+ event Event
+ err error
+}
+
+var (
+ orig_cursor_info console_cursor_info
+ orig_size coord
+ orig_mode dword
+ orig_screen syscall.Handle
+ back_buffer cellbuf
+ front_buffer cellbuf
+ term_size coord
+ input_mode = InputEsc
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ in syscall.Handle
+ out syscall.Handle
+ interrupt syscall.Handle
+ charbuf []char_info
+ diffbuf []diff_msg
+ beg_x = -1
+ beg_y = -1
+ beg_i = -1
+ input_comm = make(chan Event)
+ interrupt_comm = make(chan struct{})
+ cancel_comm = make(chan bool, 1)
+ cancel_done_comm = make(chan bool)
+ alt_mode_esc = false
+
+ // these ones just to prevent heap allocs at all costs
+ tmp_info console_screen_buffer_info
+ tmp_arg dword
+ tmp_coord0 = coord{0, 0}
+ tmp_coord = coord{0, 0}
+ tmp_rect = small_rect{0, 0, 0, 0}
+)
+
+func get_cursor_position(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return tmp_info.cursor_position
+}
+
+func get_term_size(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return tmp_info.size
+}
+
+func get_win_size(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return coord{
+ x: tmp_info.window.right - tmp_info.window.left + 1,
+ y: tmp_info.window.bottom - tmp_info.window.top + 1,
+ }
+}
+
+func update_size_maybe() {
+ size := get_term_size(out)
+ if size.x != term_size.x || size.y != term_size.y {
+ term_size = size
+ back_buffer.resize(int(size.x), int(size.y))
+ front_buffer.resize(int(size.x), int(size.y))
+ front_buffer.clear()
+ clear()
+
+ area := int(size.x) * int(size.y)
+ if cap(charbuf) < area {
+ charbuf = make([]char_info, 0, area)
+ }
+ }
+}
+
+var color_table_bg = []word{
+ 0, // default (black)
+ 0, // black
+ background_red,
+ background_green,
+ background_red | background_green, // yellow
+ background_blue,
+ background_red | background_blue, // magenta
+ background_green | background_blue, // cyan
+ background_red | background_blue | background_green, // white
+}
+
+var color_table_fg = []word{
+ foreground_red | foreground_blue | foreground_green, // default (white)
+ 0,
+ foreground_red,
+ foreground_green,
+ foreground_red | foreground_green, // yellow
+ foreground_blue,
+ foreground_red | foreground_blue, // magenta
+ foreground_green | foreground_blue, // cyan
+ foreground_red | foreground_blue | foreground_green, // white
+}
+
+const (
+ replacement_char = '\uFFFD'
+ max_rune = '\U0010FFFF'
+ surr1 = 0xd800
+ surr2 = 0xdc00
+ surr3 = 0xe000
+ surr_self = 0x10000
+)
+
+func append_diff_line(y int) int {
+ n := 0
+ for x := 0; x < front_buffer.width; {
+ cell_offset := y*front_buffer.width + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ attr, char := cell_to_char_info(*back)
+ charbuf = append(charbuf, char_info{attr: attr, char: char[0]})
+ *front = *back
+ n++
+ w := runewidth.RuneWidth(back.Ch)
+ if w == 0 || w == 2 && runewidth.IsAmbiguousWidth(back.Ch) {
+ w = 1
+ }
+ x += w
+ // If not CJK, fill trailing space with whitespace
+ if !is_cjk && w == 2 {
+ charbuf = append(charbuf, char_info{attr: attr, char: ' '})
+ }
+ }
+ return n
+}
+
+// compares 'back_buffer' with 'front_buffer' and prepares all changes in the form of
+// 'diff_msg's in the 'diff_buf'
+func prepare_diff_messages() {
+ // clear buffers
+ diffbuf = diffbuf[:0]
+ charbuf = charbuf[:0]
+
+ var diff diff_msg
+ gbeg := 0
+ for y := 0; y < front_buffer.height; y++ {
+ same := true
+ line_offset := y * front_buffer.width
+ for x := 0; x < front_buffer.width; x++ {
+ cell_offset := line_offset + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ if *back != *front {
+ same = false
+ break
+ }
+ }
+ if same && diff.lines > 0 {
+ diffbuf = append(diffbuf, diff)
+ diff = diff_msg{}
+ }
+ if !same {
+ beg := len(charbuf)
+ end := beg + append_diff_line(y)
+ if diff.lines == 0 {
+ diff.pos = short(y)
+ gbeg = beg
+ }
+ diff.lines++
+ diff.chars = charbuf[gbeg:end]
+ }
+ }
+ if diff.lines > 0 {
+ diffbuf = append(diffbuf, diff)
+ diff = diff_msg{}
+ }
+}
+
+func get_ct(table []word, idx int) word {
+ idx = idx & 0x0F
+ if idx >= len(table) {
+ idx = len(table) - 1
+ }
+ return table[idx]
+}
+
+func cell_to_char_info(c Cell) (attr word, wc [2]wchar) {
+ attr = get_ct(color_table_fg, int(c.Fg)) | get_ct(color_table_bg, int(c.Bg))
+ if c.Fg&AttrReverse|c.Bg&AttrReverse != 0 {
+ attr = (attr&0xF0)>>4 | (attr&0x0F)<<4
+ }
+ if c.Fg&AttrBold != 0 {
+ attr |= foreground_intensity
+ }
+ if c.Bg&AttrBold != 0 {
+ attr |= background_intensity
+ }
+
+ r0, r1 := utf16.EncodeRune(c.Ch)
+ if r0 == 0xFFFD {
+ wc[0] = wchar(c.Ch)
+ wc[1] = ' '
+ } else {
+ wc[0] = wchar(r0)
+ wc[1] = wchar(r1)
+ }
+ return
+}
+
+func move_cursor(x, y int) {
+ err := set_console_cursor_position(out, coord{short(x), short(y)})
+ if err != nil {
+ panic(err)
+ }
+}
+
+func show_cursor(visible bool) {
+ var v int32
+ if visible {
+ v = 1
+ }
+
+ var info console_cursor_info
+ info.size = 100
+ info.visible = v
+ err := set_console_cursor_info(out, &info)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func clear() {
+ var err error
+ attr, char := cell_to_char_info(Cell{
+ ' ',
+ foreground,
+ background,
+ })
+
+ area := int(term_size.x) * int(term_size.y)
+ err = fill_console_output_attribute(out, attr, area)
+ if err != nil {
+ panic(err)
+ }
+ err = fill_console_output_character(out, char[0], area)
+ if err != nil {
+ panic(err)
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+}
+
+func key_event_record_to_event(r *key_event_record) (Event, bool) {
+ if r.key_down == 0 {
+ return Event{}, false
+ }
+
+ e := Event{Type: EventKey}
+ if input_mode&InputAlt != 0 {
+ if alt_mode_esc {
+ e.Mod = ModAlt
+ alt_mode_esc = false
+ }
+ if r.control_key_state&(left_alt_pressed|right_alt_pressed) != 0 {
+ e.Mod = ModAlt
+ }
+ }
+
+ ctrlpressed := r.control_key_state&(left_ctrl_pressed|right_ctrl_pressed) != 0
+
+ if r.virtual_key_code >= vk_f1 && r.virtual_key_code <= vk_f12 {
+ switch r.virtual_key_code {
+ case vk_f1:
+ e.Key = KeyF1
+ case vk_f2:
+ e.Key = KeyF2
+ case vk_f3:
+ e.Key = KeyF3
+ case vk_f4:
+ e.Key = KeyF4
+ case vk_f5:
+ e.Key = KeyF5
+ case vk_f6:
+ e.Key = KeyF6
+ case vk_f7:
+ e.Key = KeyF7
+ case vk_f8:
+ e.Key = KeyF8
+ case vk_f9:
+ e.Key = KeyF9
+ case vk_f10:
+ e.Key = KeyF10
+ case vk_f11:
+ e.Key = KeyF11
+ case vk_f12:
+ e.Key = KeyF12
+ default:
+ panic("unreachable")
+ }
+
+ return e, true
+ }
+
+ if r.virtual_key_code <= vk_delete {
+ switch r.virtual_key_code {
+ case vk_insert:
+ e.Key = KeyInsert
+ case vk_delete:
+ e.Key = KeyDelete
+ case vk_home:
+ e.Key = KeyHome
+ case vk_end:
+ e.Key = KeyEnd
+ case vk_pgup:
+ e.Key = KeyPgup
+ case vk_pgdn:
+ e.Key = KeyPgdn
+ case vk_arrow_up:
+ e.Key = KeyArrowUp
+ case vk_arrow_down:
+ e.Key = KeyArrowDown
+ case vk_arrow_left:
+ e.Key = KeyArrowLeft
+ case vk_arrow_right:
+ e.Key = KeyArrowRight
+ case vk_backspace:
+ if ctrlpressed {
+ e.Key = KeyBackspace2
+ } else {
+ e.Key = KeyBackspace
+ }
+ case vk_tab:
+ e.Key = KeyTab
+ case vk_enter:
+ e.Key = KeyEnter
+ case vk_esc:
+ switch {
+ case input_mode&InputEsc != 0:
+ e.Key = KeyEsc
+ case input_mode&InputAlt != 0:
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ case vk_space:
+ if ctrlpressed {
+ // manual return here, because KeyCtrlSpace is zero
+ e.Key = KeyCtrlSpace
+ return e, true
+ } else {
+ e.Key = KeySpace
+ }
+ }
+
+ if e.Key != 0 {
+ return e, true
+ }
+ }
+
+ if ctrlpressed {
+ if Key(r.unicode_char) >= KeyCtrlA && Key(r.unicode_char) <= KeyCtrlRsqBracket {
+ e.Key = Key(r.unicode_char)
+ if input_mode&InputAlt != 0 && e.Key == KeyEsc {
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ return e, true
+ }
+ switch r.virtual_key_code {
+ case 192, 50:
+ // manual return here, because KeyCtrl2 is zero
+ e.Key = KeyCtrl2
+ return e, true
+ case 51:
+ if input_mode&InputAlt != 0 {
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ e.Key = KeyCtrl3
+ case 52:
+ e.Key = KeyCtrl4
+ case 53:
+ e.Key = KeyCtrl5
+ case 54:
+ e.Key = KeyCtrl6
+ case 189, 191, 55:
+ e.Key = KeyCtrl7
+ case 8, 56:
+ e.Key = KeyCtrl8
+ }
+
+ if e.Key != 0 {
+ return e, true
+ }
+ }
+
+ if r.unicode_char != 0 {
+ e.Ch = rune(r.unicode_char)
+ return e, true
+ }
+
+ return Event{}, false
+}
+
+func input_event_producer() {
+ var r input_record
+ var err error
+ var last_button Key
+ var last_button_pressed Key
+ var last_state = dword(0)
+ var last_x, last_y = -1, -1
+ handles := []syscall.Handle{in, interrupt}
+ for {
+ err = wait_for_multiple_objects(handles)
+ if err != nil {
+ input_comm <- Event{Type: EventError, Err: err}
+ }
+
+ select {
+ case <-cancel_comm:
+ cancel_done_comm <- true
+ return
+ default:
+ }
+
+ err = read_console_input(in, &r)
+ if err != nil {
+ input_comm <- Event{Type: EventError, Err: err}
+ }
+
+ switch r.event_type {
+ case key_event:
+ kr := (*key_event_record)(unsafe.Pointer(&r.event))
+ ev, ok := key_event_record_to_event(kr)
+ if ok {
+ for i := 0; i < int(kr.repeat_count); i++ {
+ input_comm <- ev
+ }
+ }
+ case window_buffer_size_event:
+ sr := *(*window_buffer_size_record)(unsafe.Pointer(&r.event))
+ input_comm <- Event{
+ Type: EventResize,
+ Width: int(sr.size.x),
+ Height: int(sr.size.y),
+ }
+ case mouse_event:
+ mr := *(*mouse_event_record)(unsafe.Pointer(&r.event))
+ ev := Event{Type: EventMouse}
+ switch mr.event_flags {
+ case 0, 2:
+ // single or double click
+ cur_state := mr.button_state
+ switch {
+ case last_state&mouse_lmb == 0 && cur_state&mouse_lmb != 0:
+ last_button = MouseLeft
+ last_button_pressed = last_button
+ case last_state&mouse_rmb == 0 && cur_state&mouse_rmb != 0:
+ last_button = MouseRight
+ last_button_pressed = last_button
+ case last_state&mouse_mmb == 0 && cur_state&mouse_mmb != 0:
+ last_button = MouseMiddle
+ last_button_pressed = last_button
+ case last_state&mouse_lmb != 0 && cur_state&mouse_lmb == 0:
+ last_button = MouseRelease
+ case last_state&mouse_rmb != 0 && cur_state&mouse_rmb == 0:
+ last_button = MouseRelease
+ case last_state&mouse_mmb != 0 && cur_state&mouse_mmb == 0:
+ last_button = MouseRelease
+ default:
+ last_state = cur_state
+ continue
+ }
+ last_state = cur_state
+ ev.Key = last_button
+ last_x, last_y = int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ ev.MouseX = last_x
+ ev.MouseY = last_y
+ case 1:
+ // mouse motion
+ x, y := int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ if last_state != 0 && (last_x != x || last_y != y) {
+ ev.Key = last_button_pressed
+ ev.Mod = ModMotion
+ ev.MouseX = x
+ ev.MouseY = y
+ last_x, last_y = x, y
+ } else {
+ ev.Type = EventNone
+ }
+ case 4:
+ // mouse wheel
+ n := int16(mr.button_state >> 16)
+ if n > 0 {
+ ev.Key = MouseWheelUp
+ } else {
+ ev.Key = MouseWheelDown
+ }
+ last_x, last_y = int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ ev.MouseX = last_x
+ ev.MouseY = last_y
+ default:
+ ev.Type = EventNone
+ }
+ if ev.Type != EventNone {
+ input_comm <- ev
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/nsf/termbox-go/terminfo.go b/vendor/github.com/nsf/termbox-go/terminfo.go
new file mode 100644
index 0000000..35dbd70
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/terminfo.go
@@ -0,0 +1,221 @@
+// +build !windows
+// This file contains a simple and incomplete implementation of the terminfo
+// database. Information was taken from the ncurses manpages term(5) and
+// terminfo(5). Currently, only the string capabilities for special keys and for
+// functions without parameters are actually used. Colors are still done with
+// ANSI escape sequences. Other special features that are not (yet?) supported
+// are reading from ~/.terminfo, the TERMINFO_DIRS variable, Berkeley database
+// format and extended capabilities.
+
+package termbox
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+const (
+ ti_magic = 0432
+ ti_header_length = 12
+ ti_mouse_enter = "\x1b[?1000h\x1b[?1002h\x1b[?1015h\x1b[?1006h"
+ ti_mouse_leave = "\x1b[?1006l\x1b[?1015l\x1b[?1002l\x1b[?1000l"
+)
+
+func load_terminfo() ([]byte, error) {
+ var data []byte
+ var err error
+
+ term := os.Getenv("TERM")
+ if term == "" {
+ return nil, fmt.Errorf("termbox: TERM not set")
+ }
+
+ // The following behaviour follows the one described in terminfo(5) as
+ // distributed by ncurses.
+
+ terminfo := os.Getenv("TERMINFO")
+ if terminfo != "" {
+ // if TERMINFO is set, no other directory should be searched
+ return ti_try_path(terminfo)
+ }
+
+ // next, consider ~/.terminfo
+ home := os.Getenv("HOME")
+ if home != "" {
+ data, err = ti_try_path(home + "/.terminfo")
+ if err == nil {
+ return data, nil
+ }
+ }
+
+ // next, TERMINFO_DIRS
+ dirs := os.Getenv("TERMINFO_DIRS")
+ if dirs != "" {
+ for _, dir := range strings.Split(dirs, ":") {
+ if dir == "" {
+ // "" -> "/usr/share/terminfo"
+ dir = "/usr/share/terminfo"
+ }
+ data, err = ti_try_path(dir)
+ if err == nil {
+ return data, nil
+ }
+ }
+ }
+
+ // fall back to /usr/share/terminfo
+ return ti_try_path("/usr/share/terminfo")
+}
+
+func ti_try_path(path string) (data []byte, err error) {
+ // load_terminfo already made sure it is set
+ term := os.Getenv("TERM")
+
+ // first try, the typical *nix path
+ terminfo := path + "/" + term[0:1] + "/" + term
+ data, err = ioutil.ReadFile(terminfo)
+ if err == nil {
+ return
+ }
+
+ // fallback to darwin specific dirs structure
+ terminfo = path + "/" + hex.EncodeToString([]byte(term[:1])) + "/" + term
+ data, err = ioutil.ReadFile(terminfo)
+ return
+}
+
+func setup_term_builtin() error {
+ name := os.Getenv("TERM")
+ if name == "" {
+ return errors.New("termbox: TERM environment variable not set")
+ }
+
+ for _, t := range terms {
+ if t.name == name {
+ keys = t.keys
+ funcs = t.funcs
+ return nil
+ }
+ }
+
+ compat_table := []struct {
+ partial string
+ keys []string
+ funcs []string
+ }{
+ {"xterm", xterm_keys, xterm_funcs},
+ {"rxvt", rxvt_unicode_keys, rxvt_unicode_funcs},
+ {"linux", linux_keys, linux_funcs},
+ {"Eterm", eterm_keys, eterm_funcs},
+ {"screen", screen_keys, screen_funcs},
+ // let's assume that 'cygwin' is xterm compatible
+ {"cygwin", xterm_keys, xterm_funcs},
+ {"st", xterm_keys, xterm_funcs},
+ }
+
+ // try compatibility variants
+ for _, it := range compat_table {
+ if strings.Contains(name, it.partial) {
+ keys = it.keys
+ funcs = it.funcs
+ return nil
+ }
+ }
+
+ return errors.New("termbox: unsupported terminal")
+}
+
+func setup_term() (err error) {
+ var data []byte
+ var header [6]int16
+ var str_offset, table_offset int16
+
+ data, err = load_terminfo()
+ if err != nil {
+ return setup_term_builtin()
+ }
+
+ rd := bytes.NewReader(data)
+ // 0: magic number, 1: size of names section, 2: size of boolean section, 3:
+ // size of numbers section (in integers), 4: size of the strings section (in
+ // integers), 5: size of the string table
+
+ err = binary.Read(rd, binary.LittleEndian, header[:])
+ if err != nil {
+ return
+ }
+
+ if (header[1]+header[2])%2 != 0 {
+ // old quirk to align everything on word boundaries
+ header[2] += 1
+ }
+ str_offset = ti_header_length + header[1] + header[2] + 2*header[3]
+ table_offset = str_offset + 2*header[4]
+
+ keys = make([]string, 0xFFFF-key_min)
+ for i, _ := range keys {
+ keys[i], err = ti_read_string(rd, str_offset+2*ti_keys[i], table_offset)
+ if err != nil {
+ return
+ }
+ }
+ funcs = make([]string, t_max_funcs)
+ // the last two entries are reserved for mouse. because the table offset is
+ // not there, the two entries have to fill in manually
+ for i, _ := range funcs[:len(funcs)-2] {
+ funcs[i], err = ti_read_string(rd, str_offset+2*ti_funcs[i], table_offset)
+ if err != nil {
+ return
+ }
+ }
+ funcs[t_max_funcs-2] = ti_mouse_enter
+ funcs[t_max_funcs-1] = ti_mouse_leave
+ return nil
+}
+
+func ti_read_string(rd *bytes.Reader, str_off, table int16) (string, error) {
+ var off int16
+
+ _, err := rd.Seek(int64(str_off), 0)
+ if err != nil {
+ return "", err
+ }
+ err = binary.Read(rd, binary.LittleEndian, &off)
+ if err != nil {
+ return "", err
+ }
+ _, err = rd.Seek(int64(table+off), 0)
+ if err != nil {
+ return "", err
+ }
+ var bs []byte
+ for {
+ b, err := rd.ReadByte()
+ if err != nil {
+ return "", err
+ }
+ if b == byte(0x00) {
+ break
+ }
+ bs = append(bs, b)
+ }
+ return string(bs), nil
+}
+
+// "Maps" the function constants from termbox.go to the number of the respective
+// string capability in the terminfo file. Taken from (ncurses) term.h.
+var ti_funcs = []int16{
+ 28, 40, 16, 13, 5, 39, 36, 27, 26, 34, 89, 88,
+}
+
+// Same as above for the special keys.
+var ti_keys = []int16{
+ 66, 68 /* apparently not a typo; 67 is F10 for whatever reason */, 69, 70,
+ 71, 72, 73, 74, 75, 67, 216, 217, 77, 59, 76, 164, 82, 81, 87, 61, 79, 83,
+}
diff --git a/vendor/github.com/nsf/termbox-go/terminfo_builtin.go b/vendor/github.com/nsf/termbox-go/terminfo_builtin.go
new file mode 100644
index 0000000..a948660
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/terminfo_builtin.go
@@ -0,0 +1,64 @@
+// +build !windows
+
+package termbox
+
+// Eterm
+var eterm_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var eterm_funcs = []string{
+ "\x1b7\x1b[?47h", "\x1b[2J\x1b[?47l\x1b8", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "", "", "", "",
+}
+
+// screen
+var screen_keys = []string{
+ "\x1bOP", "\x1bOQ", "\x1bOR", "\x1bOS", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[1~", "\x1b[4~", "\x1b[5~", "\x1b[6~", "\x1bOA", "\x1bOB", "\x1bOD", "\x1bOC",
+}
+var screen_funcs = []string{
+ "\x1b[?1049h", "\x1b[?1049l", "\x1b[34h\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b[?1h\x1b=", "\x1b[?1l\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// xterm
+var xterm_keys = []string{
+ "\x1bOP", "\x1bOQ", "\x1bOR", "\x1bOS", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1bOH", "\x1bOF", "\x1b[5~", "\x1b[6~", "\x1bOA", "\x1bOB", "\x1bOD", "\x1bOC",
+}
+var xterm_funcs = []string{
+ "\x1b[?1049h", "\x1b[?1049l", "\x1b[?12l\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b(B\x1b[m", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b[?1h\x1b=", "\x1b[?1l\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// rxvt-unicode
+var rxvt_unicode_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var rxvt_unicode_funcs = []string{
+ "\x1b[?1049h", "\x1b[r\x1b[?1049l", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x1b(B", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b=", "\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// linux
+var linux_keys = []string{
+ "\x1b[[A", "\x1b[[B", "\x1b[[C", "\x1b[[D", "\x1b[[E", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[1~", "\x1b[4~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var linux_funcs = []string{
+ "", "", "\x1b[?25h\x1b[?0c", "\x1b[?25l\x1b[?1c", "\x1b[H\x1b[J", "\x1b[0;10m", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "", "", "", "",
+}
+
+// rxvt-256color
+var rxvt_256color_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var rxvt_256color_funcs = []string{
+ "\x1b7\x1b[?47h", "\x1b[2J\x1b[?47l\x1b8", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b=", "\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+var terms = []struct {
+ name string
+ keys []string
+ funcs []string
+}{
+ {"Eterm", eterm_keys, eterm_funcs},
+ {"screen", screen_keys, screen_funcs},
+ {"xterm", xterm_keys, xterm_funcs},
+ {"rxvt-unicode", rxvt_unicode_keys, rxvt_unicode_funcs},
+ {"linux", linux_keys, linux_funcs},
+ {"rxvt-256color", rxvt_256color_keys, rxvt_256color_funcs},
+}
diff --git a/vendor/github.com/skratchdot/open-golang/LICENSE-MIT b/vendor/github.com/skratchdot/open-golang/LICENSE-MIT
new file mode 100644
index 0000000..afd04c8
--- /dev/null
+++ b/vendor/github.com/skratchdot/open-golang/LICENSE-MIT
@@ -0,0 +1,22 @@
+Copyright (c) 2013 skratchdot
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/skratchdot/open-golang/open/exec.go b/vendor/github.com/skratchdot/open-golang/open/exec.go
new file mode 100644
index 0000000..1b0e713
--- /dev/null
+++ b/vendor/github.com/skratchdot/open-golang/open/exec.go
@@ -0,0 +1,18 @@
+// +build !windows,!darwin
+
+package open
+
+import (
+ "os/exec"
+)
+
+// http://sources.debian.net/src/xdg-utils/1.1.0~rc1%2Bgit20111210-7.1/scripts/xdg-open/
+// http://sources.debian.net/src/xdg-utils/1.1.0~rc1%2Bgit20111210-7.1/scripts/xdg-mime/
+
+func open(input string) *exec.Cmd {
+ return exec.Command("xdg-open", input)
+}
+
+func openWith(input string, appName string) *exec.Cmd {
+ return exec.Command(appName, input)
+}
diff --git a/vendor/github.com/skratchdot/open-golang/open/exec_darwin.go b/vendor/github.com/skratchdot/open-golang/open/exec_darwin.go
new file mode 100644
index 0000000..16160e6
--- /dev/null
+++ b/vendor/github.com/skratchdot/open-golang/open/exec_darwin.go
@@ -0,0 +1,15 @@
+// +build darwin
+
+package open
+
+import (
+ "os/exec"
+)
+
+func open(input string) *exec.Cmd {
+ return exec.Command("open", input)
+}
+
+func openWith(input string, appName string) *exec.Cmd {
+ return exec.Command("open", "-a", appName, input)
+}
diff --git a/vendor/github.com/skratchdot/open-golang/open/exec_windows.go b/vendor/github.com/skratchdot/open-golang/open/exec_windows.go
new file mode 100644
index 0000000..2d93617
--- /dev/null
+++ b/vendor/github.com/skratchdot/open-golang/open/exec_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+
+package open
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ cmd = "url.dll,FileProtocolHandler"
+ runDll32 = filepath.Join(os.Getenv("SYSTEMROOT"), "System32", "rundll32.exe")
+)
+
+func cleaninput(input string) string {
+ r := strings.NewReplacer("&", "^&")
+ return r.Replace(input)
+}
+
+func open(input string) *exec.Cmd {
+ return exec.Command(runDll32, cmd, input)
+}
+
+func openWith(input string, appName string) *exec.Cmd {
+ return exec.Command("cmd", "/C", "start", "", appName, cleaninput(input))
+}
diff --git a/vendor/github.com/skratchdot/open-golang/open/open.go b/vendor/github.com/skratchdot/open-golang/open/open.go
new file mode 100644
index 0000000..b1f648f
--- /dev/null
+++ b/vendor/github.com/skratchdot/open-golang/open/open.go
@@ -0,0 +1,50 @@
+/*
+
+ Open a file, directory, or URI using the OS's default
+ application for that object type. Optionally, you can
+ specify an application to use.
+
+ This is a proxy for the following commands:
+
+ OSX: "open"
+ Windows: "start"
+ Linux/Other: "xdg-open"
+
+ This is a golang port of the node.js module: https://github.com/pwnall/node-open
+
+*/
+package open
+
+/*
+ Open a file, directory, or URI using the OS's default
+ application for that object type. Wait for the open
+ command to complete.
+*/
+func Run(input string) error {
+ return open(input).Run()
+}
+
+/*
+ Open a file, directory, or URI using the OS's default
+ application for that object type. Don't wait for the
+ open command to complete.
+*/
+func Start(input string) error {
+ return open(input).Start()
+}
+
+/*
+ Open a file, directory, or URI using the specified application.
+ Wait for the open command to complete.
+*/
+func RunWith(input string, appName string) error {
+ return openWith(input, appName).Run()
+}
+
+/*
+ Open a file, directory, or URI using the specified application.
+ Don't wait for the open command to complete.
+*/
+func StartWith(input string, appName string) error {
+ return openWith(input, appName).Start()
+}
diff --git a/vendor/github.com/yesnault/hashtag/LICENSE b/vendor/github.com/yesnault/hashtag/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/vendor/github.com/yesnault/hashtag/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/yesnault/hashtag/README.md b/vendor/github.com/yesnault/hashtag/README.md
new file mode 100644
index 0000000..6219a44
--- /dev/null
+++ b/vendor/github.com/yesnault/hashtag/README.md
@@ -0,0 +1,39 @@
+# hashtag
+Package hashtag implements extraction of Twitter type hashtags, mentions and
+replies form text in Go. This package partially ports extraction routines from
+[Twitter's official Java package](https://github.com/twitter/twitter-text)
+to Go and runs most of the standard twitter-text conformance tests. It does not
+yet implement URL extraction (and hence URL/Hashtag overlaps), cashtags and lists
+
+Since the package attempts to closely follow the Twitter-Text Java API, function
+names may be longer than typical Go package function names
+
+Installation
+------------
+Note: As the package matures, I plan to move this to gopkg.in
+```
+go get github.com/srinathh/hashtag
+```
+
+Usage
+-----
+Import the package as
+```
+import "github.com/srinathh/hashtag"
+```
+
+This package supports the following functions to extract hashtags and mentions
+with or without position markers. The functions omit # and @ characters
+(and also their higher unicode number counterparts @ and #) from return values
+- `ExtractHashtags(text string) []string`
+- `ExtractHashtagsWithIndices(text string) []Entity`
+- `ExtractMentions(text string) []string`
+- `ExtractMentionsWithIndices(text string) []Entity`
+- `ExtractReply(text string) string`
+
+Documentation
+-------------
+Read the full documentation and examples on [GoDoc](http://godoc.org/github.com/srinathh/hashtag)
+
+
+
diff --git a/vendor/github.com/yesnault/hashtag/hashtag.go b/vendor/github.com/yesnault/hashtag/hashtag.go
new file mode 100644
index 0000000..cd88317
--- /dev/null
+++ b/vendor/github.com/yesnault/hashtag/hashtag.go
@@ -0,0 +1,201 @@
+/*
+ Copyright 2014 Hariharan Srinath
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+Package hashtag implements extraction of Twitter type hashtags, mentions and
+replies form text in Go. This package partially ports extraction routines from
+Twitter's official Java package at https://github.com/twitter/twitter-text
+to Go and runs most of the standard twitter-text conformance tests. It does not
+yet implement URL extraction (and hence URL/Hashtag overlaps), cashtags and lists
+
+Since the package attempts to closely follow the Twitter-Text Java API, function
+names may be longer than typical Go package function names
+*/
+package hashtag
+
+import (
+ "regexp"
+ "strings"
+)
+
+const (
+ unicodeSpaces = "[" +
+ "\\x{0009}-\\x{000d}" + // # White_Space # Cc [5] ..
+ "\\x{0020}" + // White_Space # Zs SPACE
+ "\\x{0085}" + // White_Space # Cc
+ "\\x{00a0}" + // White_Space # Zs NO-BREAK SPACE
+ "\\x{1680}" + // White_Space # Zs OGHAM SPACE MARK
+ "\\x{180E}" + // White_Space # Zs MONGOLIAN VOWEL SEPARATOR
+ "\\x{2000}-\\x{200a}" + // # White_Space # Zs [11] EN QUAD..HAIR SPACE
+ "\\x{2028}" + // White_Space # Zl LINE SEPARATOR
+ "\\x{2029}" + // White_Space # Zp PARAGRAPH SEPARATOR
+ "\\x{202F}" + // White_Space # Zs NARROW NO-BREAK SPACE
+ "\\x{205F}" + // White_Space # Zs MEDIUM MATHEMATICAL SPACE
+ "\\x{3000}" + // White_Space # Zs IDEOGRAPHIC SPACE
+ "]"
+
+ hashtagLetters = "\\pL\\pM"
+ hashtagNumerals = "\\p{Nd}"
+ hashtagSpecialChars = "/" + "\\." + "_" + "\\-" + "\\:" + "\\+" + "@" + "?" + "&" + "~" + "=" + "," +
+ "\\x{200c}" + // ZERO WIDTH NON-JOINER (ZWNJ)
+ "\\x{200d}" + // ZERO WIDTH JOINER (ZWJ)
+ "\\x{a67e}" + // CYRILLIC KAVYKA
+ "\\x{05be}" + // HEBREW PUNCTUATION MAQAF
+ "\\x{05f3}" + // HEBREW PUNCTUATION GERESH
+ "\\x{05f4}" + // HEBREW PUNCTUATION GERSHAYIM
+ "\\x{309b}" + // KATAKANA-HIRAGANA VOICED SOUND MARK
+ "\\x{309c}" + // KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK
+ "\\x{30a0}" + // KATAKANA-HIRAGANA DOUBLE HYPHEN
+ "\\x{30fb}" + // KATAKANA MIDDLE DOT
+ "\\x{3003}" + // DITTO MARK
+ "\\x{0f0b}" + // TIBETAN MARK INTERSYLLABIC TSHEG
+ "\\x{0f0c}" + // TIBETAN MARK DELIMITER TSHEG BSTAR
+ "\\x{0f0d}" // TIBETAN MARK SHAD
+
+ hashtagLettersNumerals = hashtagLetters + hashtagNumerals + hashtagSpecialChars
+ hashtagLettersNumeralsSet = "[" + hashtagLettersNumerals + "]"
+ hashtagLettersSet = "[:" + hashtagLetters + "]"
+
+ atSignsChars = "@\\x{FF20}"
+ atSigns = "[" + atSignsChars + "]"
+
+ latinAccentsChars = "\\x{00c0}-\\x{00d6}\\x{00d8}-\\x{00f6}\\x{00f8}-\\x{00ff}" + // Latin-1
+ "\\x{0100}-\\x{024f}" + // Latin Extended A and B
+ "\\x{0253}\\x{0254}\\x{0256}\\x{0257}\\x{0259}\\x{025b}\\x{0263}\\x{0268}\\x{026f}\\x{0272}\\x{0289}\\x{028b}" + // IPA Extensions
+ "\\x{02bb}" + // Hawaiian
+ "\\x{0300}-\\x{036f}" + // Combining diacritics
+ "\\x{1e00}-\\x{1eff}" // Latin Extended Additional (mostly for Vietnamese)
+)
+
+var validMention = regexp.MustCompile("([^A-Za-z0-9_!#$%&*" + atSignsChars + "]|^|[Rr][tT]:?)(" + atSigns + "+)([A-Za-z0-9_\\.\\-]{1,20})")
+
+var invalidMentionMatchEnd = regexp.MustCompile("^(?:[" + atSignsChars + latinAccentsChars + "]|://)")
+
+var validHashtag = regexp.MustCompile("(?m)(?:^|[^&" + hashtagLettersNumerals + "])(?:#|\\x{FF03})(" +
+ hashtagLettersNumeralsSet + "*" + hashtagLettersSet + hashtagLettersNumeralsSet +
+ "*)")
+
+var invalidHashtagMatchEnd = regexp.MustCompile("^(?:[##])")
+
+var validReply = regexp.MustCompile("^(?:" + unicodeSpaces + ")*" + atSigns + "([A-Za-z0-9_]{1,20})")
+
+/*
+Entity is used by ExtractXXXXWithIndices functions to return the position
+and text extracted. This may be expanded in the future to support List slugs
+*/
+type Entity struct {
+ Start int
+ End int
+ Value string
+}
+
+/*
+ExtractHashtags extracts hashtags without the hash markers from input
+text and returns them as a slice of strings.
+*/
+func ExtractHashtags(text string) []string {
+ entities := ExtractHashtagsWithIndices(text)
+ ret := make([]string, len(entities))
+
+ for j, entity := range entities {
+ ret[j] = entity.Value
+ }
+ return ret
+}
+
+/*
+ExtractHashtagsWithIndices extracts hashtags without the hash markers from
+input text and returns them as a slice of Entities containing start/end positions.
+*/
+func ExtractHashtagsWithIndices(text string) []Entity {
+ if len(text) == 0 || !strings.ContainsAny(text, "#\uFF03") {
+ return []Entity{}
+ }
+
+ matches := validHashtag.FindAllStringSubmatchIndex(text, -1)
+ entities := []Entity{}
+
+ for _, match := range matches {
+ if !invalidHashtagMatchEnd.MatchString(text[match[1]:]) {
+ value := text[match[2]:match[3]]
+ /*if strings.Contains(value, "://") {
+ continue
+ }*/
+ entities = append(entities, Entity{
+ Start: match[2],
+ End: match[3],
+ Value: value,
+ })
+ }
+ }
+ return entities
+}
+
+/*
+ExtractMentionsWithIndices extracts mentions without the @ markers from
+input text and returns them as a slice of Entities containing start/end positions.
+*/
+func ExtractMentionsWithIndices(text string) []Entity {
+ if len(text) == 0 || !strings.ContainsAny(text, "@@") {
+ return []Entity{}
+ }
+
+ matches := validMention.FindAllStringSubmatchIndex(text, -1)
+ entities := []Entity{}
+ for _, match := range matches {
+ if !invalidMentionMatchEnd.MatchString(text[match[1]:]) {
+ entities = append(entities, Entity{
+ Start: match[6],
+ End: match[7],
+ Value: text[match[6]:match[7]],
+ })
+ }
+ }
+ return entities
+}
+
+/*
+ExtractMentions extracts mentions without the @ markers from
+input text and returns them as a slice of strings.
+*/
+func ExtractMentions(text string) []string {
+ entities := ExtractMentionsWithIndices(text)
+ ret := make([]string, len(entities))
+
+ for j, ent := range entities {
+ ret[j] = ent.Value
+ }
+ return ret
+}
+
+/*
+ExtractReply extracts reply username without the
+@ marker from input text and returns it as a string.
+Empty string signals no reply username
+*/
+func ExtractReply(text string) string {
+ if len(text) == 0 || !strings.ContainsAny(text, "@@") {
+ return ""
+ }
+
+ matches := validReply.FindAllStringSubmatchIndex(text, -1)
+ for _, match := range matches {
+ if !invalidMentionMatchEnd.MatchString(text[match[1]:]) {
+ return text[match[2]:match[3]]
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/yesnault/hashtag/tests.yml b/vendor/github.com/yesnault/hashtag/tests.yml
new file mode 100644
index 0000000..a16365f
--- /dev/null
+++ b/vendor/github.com/yesnault/hashtag/tests.yml
@@ -0,0 +1,475 @@
+mentions:
+ - description: "Test fastpath exit without mentions"
+ text: "no mentions in this #text"
+ expected: []
+
+ - description: "Extract mention at the begining of a tweet"
+ text: "@username reply"
+ expected: ["username"]
+
+ - description: "Extract mention at the end of a tweet"
+ text: "mention @username"
+ expected: ["username"]
+
+ - description: "Extract mention in the middle of a tweet"
+ text: "mention @username in the middle"
+ expected: ["username"]
+
+ - description: "Extract mention of username with underscore"
+ text: "mention @user_name"
+ expected: ["user_name"]
+
+ - description: "Extract mention of all numeric username"
+ text: "mention @12345"
+ expected: ["12345"]
+
+ - description: "Extract mention with dot"
+ text: "mention @aaa.bbb"
+ expected: ["aaa.bbb"]
+
+ - description: "Extract mention with -"
+ text: "mention @aaa-bbb"
+ expected: ["aaa-bbb"]
+
+ - description: "Extract mention or multiple usernames"
+ text: "mention @username1 @username2"
+ expected: ["username1", "username2"]
+
+ - description: "Extract mention in the middle of a Japanese tweet"
+ text: "の@usernameに到着を待っている"
+ expected: ["username"]
+
+ - description: "DO NOT extract username ending in @"
+ text: "Current Status: @_@ (cc: @username)"
+ expected: ["username"]
+
+ - description: "DO NOT extract username followed by accented latin characters"
+ text: "@aliceìnheiro something something"
+ expected: []
+
+ - description: "Extract lone metion but not @user@user (too close to an email)"
+ text: "@username email me @test@example.com"
+ expected: ["username"]
+
+ - description: "DO NOT extract 'http' in '@http://' as username"
+ text: "@http://twitter.com"
+ expected: []
+
+ - description: "Extract mentions before newline"
+ text: "@username\n@mention"
+ expected: ["username", "mention"]
+
+ - description: "Extract mentions after 'RT'"
+ text: "RT@username RT:@mention RT @test"
+ expected: ["username", "mention", "test"]
+
+ - description: "Extract mentions after 'rt'"
+ text: "rt@username rt:@mention rt @test"
+ expected: ["username", "mention", "test"]
+
+ - description: "Extract mentions after 'Rt'"
+ text: "Rt@username Rt:@mention Rt @test"
+ expected: ["username", "mention", "test"]
+
+ - description: "Extract mentions after 'rT'"
+ text: "rT@username rT:@mention rT @test"
+ expected: ["username", "mention", "test"]
+
+ - description: "DO NOT extract username preceded by !"
+ text: "f!@kn"
+ expected: []
+
+ - description: "DO NOT extract username preceded by @"
+ text: "f@@kn"
+ expected: []
+
+ - description: "DO NOT extract username preceded by #"
+ text: "f#@kn"
+ expected: []
+
+ - description: "DO NOT extract username preceded by $"
+ text: "f$@kn"
+ expected: []
+
+ - description: "DO NOT extract username preceded by %"
+ text: "f%@kn"
+ expected: []
+
+ - description: "DO NOT extract username preceded by &"
+ text: "f&@kn"
+ expected: []
+
+ - description: "DO NOT extract username preceded by *"
+ text: "f*@kn"
+ expected: []
+
+ - description: "Extract a mention at the start"
+ text: "@username yo!"
+ expected: ["username"]
+
+ - description: "Extract a mention that has the same thing mentioned at the start"
+ text: "username @username"
+ expected: ["username"]
+
+ - description: "Extract a mention in the middle of a Japanese tweet"
+ text: "の@usernameに到着を待っている"
+ expected: ["username"]
+
+replies:
+ - description: "Test fastpath exit without replies"
+ text: "no replies in this #text"
+ expected: []
+
+ - description: "Extract reply at the begining of a tweet"
+ text: "@username reply"
+ expected: ["username"]
+
+ - description: "Extract reply preceded by only a space"
+ text: " @username reply"
+ expected: ["username"]
+
+ - description: "Extract reply preceded by only a full-width space (U+3000)"
+ text: " @username reply"
+ expected: ["username"]
+
+ - description: "DO NOT Extract reply when preceded by text"
+ text: "a @username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when preceded by ."
+ text: ".@username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when preceded by /"
+ text: "/@username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when preceded by _"
+ text: "_@username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when preceded by -"
+ text: "-@username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when preceded by +"
+ text: "+@username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when preceded by #"
+ text: "#@username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when preceded by !"
+ text: "!@username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when preceded by @"
+ text: "@@username mention, not a reply"
+ expected: []
+
+ - description: "DO NOT Extract reply when followed by URL"
+ text: "@http://twitter.com"
+ expected: []
+
+hashtags:
+ - description: "Test fastpath exit without hashtags"
+ text: "nothing like @hashtags in this text"
+ expected: []
+
+ - description: "Extract an all-alpha hashtag"
+ text: "a #hashtag here"
+ expected: ["hashtag"]
+
+ - description: "Extract a letter-then-number hashtag"
+ text: "this is #hashtag1"
+ expected: ["hashtag1"]
+
+ - description: "Extract a number-then-letter hashtag"
+ text: "#1hashtag is this"
+ expected: ["1hashtag"]
+
+ - description: "DO NOT Extract an all-numeric hashtag"
+ text: "On the #16 bus"
+ expected: []
+
+ - description: "DO NOT Extract a single numeric hashtag"
+ text: "#0"
+ expected: []
+
+ - description: "Extract hashtag after bracket"
+ text: "(#hashtag1 )#hashtag2 [#hashtag3 ]#hashtag4 ’#hashtag5’#hashtag6"
+ expected: ["hashtag1", "hashtag2", "hashtag3", "hashtag4", "hashtag5", "hashtag6"]
+
+ - description: "Extract a hashtag containing ñ"
+ text: "I'll write more tests #mañana"
+ expected: ["mañana"]
+
+ - description: "Extract a hashtag containing é"
+ text: "Working remotely #café"
+ expected: ["café"]
+
+ - description: "Extract a hashtag containing ü"
+ text: "Getting my Oktoberfest on #münchen"
+ expected: ["münchen"]
+
+ - description: "DO NOT Extract a hashtag containing Japanese"
+ text: "this is not valid: # 会議中 ハッシュ"
+ expected: []
+
+ - description: "Extract a hashtag in Korean"
+ text: "What is #트위터 anyway?"
+ expected: ["트위터"]
+
+ - description: "Extract a half-width Hangul hashtag"
+ text: "Just random half-width Hangul #ᆪᆭᄚ"
+ expected: ["ᆪᆭᄚ"]
+
+ - description: "Extract a hashtag in Russian"
+ text: "What is #ашок anyway?"
+ expected: ["ашок"]
+
+ - description: "Extract a starting katakana hashtag"
+ text: "#カタカナ is a hashtag"
+ expected: ["カタカナ"]
+
+ - description: "Extract a starting hiragana hashtag"
+ text: "#ひらがな FTW!"
+ expected: ["ひらがな"]
+
+ - description: "Extract a starting kanji hashtag"
+ text: "#漢字 is the future"
+ expected: ["漢字"]
+
+ - description: "Extract a trailing katakana hashtag"
+ text: "Hashtag #カタカナ"
+ expected: ["カタカナ"]
+
+ - description: "Extract a trailing hiragana hashtag"
+ text: "Japanese hashtags #ひらがな"
+ expected: ["ひらがな"]
+
+ - description: "Extract a trailing kanji hashtag"
+ text: "Study time #漢字"
+ expected: ["漢字"]
+
+ - description: "Extract a central katakana hashtag"
+ text: "See my #カタカナ hashtag?"
+ expected: ["カタカナ"]
+
+ - description: "Extract a central hiragana hashtag"
+ text: "Study #ひらがな for fun and profit"
+ expected: ["ひらがな"]
+
+ - description: "Extract a central kanji hashtag"
+ text: "Some say #漢字 is the past. what do they know?"
+ expected: ["漢字"]
+
+ - description: "Extract a Kanji/Katakana mixed hashtag"
+ text: "日本語ハッシュタグテスト #日本語ハッシュタグ"
+ expected: ["日本語ハッシュタグ"]
+
+ - description: "Extract a hashtag after a punctuation"
+ text: "日本語ハッシュテスト。#日本語ハッシュタグ"
+ expected: ["日本語ハッシュタグ"]
+
+ - description: "DO NOT include a punctuation in a hashtag"
+ text: "#日本語ハッシュタグ。"
+ expected: ["日本語ハッシュタグ"]
+
+ - description: "Extract a full-width Alnum hashtag"
+ text: "全角英数字ハッシュタグ #hashtag123"
+ expected: ["hashtag123"]
+
+ - description: "DO NOT extract a hashtag without a preceding space"
+ text: "日本語ハッシュタグ#日本語ハッシュタグ"
+ expected: []
+
+ - description: "Hashtag with chouon"
+ text: "長音ハッシュタグ。#サッカー"
+ expected: ["サッカー"]
+
+ - description: "Hashtag with half-width chouon"
+ text: "長音ハッシュタグ。#サッカー"
+ expected: ["サッカー"]
+
+ - description: "Hashtag with half-widh voiced sounds marks"
+ text: "#ハッシュタグ #パピプペポ"
+ expected: ["ハッシュタグ", "パピプペポ"]
+
+ - description: "Hashtag with half-width # after full-width !"
+ text: "できましたよー!#日本語ハッシュタグ。"
+ expected: ["日本語ハッシュタグ"]
+
+ - description: "Hashtag with full-width # after full-width !"
+ text: "できましたよー!#日本語ハッシュタグ。"
+ expected: ["日本語ハッシュタグ"]
+
+ - description: "Hashtag with ideographic iteration mark"
+ text: "#云々 #学問のすゝめ #いすゞ #各〻 #各〃"
+ expected: ["云々", "学問のすゝめ", "いすゞ", "各〻", "各〃"]
+
+ - description: "Hashtags with ş (U+015F)"
+ text: "Here’s a test tweet for you: #Ateş #qrşt #ştu #ş"
+ expected: ["Ateş", "qrşt", "ştu", "ş"]
+
+ - description: "Hashtags with İ (U+0130) and ı (U+0131)"
+ text: "Here’s a test tweet for you: #İn #ın"
+ expected: ["İn", "ın"]
+
+ - description: "Hashtag before punctuations, except:"
+ text: "#hashtag: #hashtag; #hashtag, #hashtag. #hashtag!"
+ expected: ["hashtag:", "hashtag", "hashtag,", "hashtag.", "hashtag"]
+
+ - description: "Hashtag with dot:"
+ text: "#hashtag.dot"
+ expected: ["hashtag.dot"]
+
+ - description: "Hashtag with /"
+ text: "#From_dev/aa #From_aa/ff"
+ expected: ["From_dev/aa", "From_aa/ff"]
+
+ - description: "Hashtag with:"
+ text: "#From:dev/aa #From:aa/ff"
+ expected: ["From:dev/aa", "From:aa/ff"]
+
+ - description: "Hashtag with ?:"
+ text: "#From:dev?aa #From:aa?ff"
+ expected: ["From:dev?aa", "From:aa?ff"]
+
+ - description: "Hashtag with =:"
+ text: "#From:dev=aa #From:aa=ff"
+ expected: ["From:dev=aa", "From:aa=ff"]
+
+ - description: "Hashtag with -:"
+ text: "#From:dev-aa #From:aa-ff"
+ expected: ["From:dev-aa", "From:aa-ff"]
+
+ - description: "Hashtag with ~:"
+ text: "#From:dev~aa #From:aa~ff"
+ expected: ["From:dev~aa", "From:aa~ff"]
+
+ - description: "Hashtag with ,:"
+ text: "#From:dev,aa #From:aa,ff"
+ expected: ["From:dev,aa", "From:aa,ff"]
+
+# - description: "Hashtag after punctuations"
+# text: ":#hashtaga ;#hashtagb ,#hashtagc .#hashtagd ?#hashtagf"
+# expected: ["hashtaga", "hashtagb", "hashtagc", "hashtagd", "hashtagf"]
+
+ - description: "Hashtag with http://"
+ text: "#hop:http://hashtaga"
+ expected: ["hop:http://hashtaga"]
+
+ - description: "Hashtag with :/"
+ text: "#http:/hashtaga"
+ expected: ["http:/hashtaga"]
+
+ - description: "Hashtag with http:aa"
+ text: "#http:aa"
+ expected: ["http:aa"]
+
+ - description: "Hashtag before newline"
+ text: "#hashtag\ntest\n#hashtag2\ntest\n#hashtag3\n"
+ expected: ["hashtag", "hashtag2", "hashtag3"]
+
+ - description: "Extract hashtag when # is followed by URL"
+ text: "#http://twitter.com #https://twitter.com"
+ expected: ["http://twitter.com", "https://twitter.com"]
+
+# Checking for overlap of hashtag with URL is not implemented currently
+# - description: "DO NOT extract hashtag if it's a part of URL"
+# text: "http://twitter.com/#hashtag twitter.com/#hashtag"
+# expected: []
+
+ - description: "Extract hashtags with Latin extended characters"
+ text: "#Azərbaycanca #mûǁae #Čeština #Ċaoiṁín"
+ expected: ["Azərbaycanca", "mûǁae", "Čeština", "Ċaoiṁín"]
+
+ - description: "Extract Arabic hashtags"
+ text: "#سیاست #ایران #السياسة #السياح #لغات #اتمی #کنفرانس #العربية #الجزيرة #فارسی"
+ expected: ["سیاست", "ایران", "السياسة", "السياح", "لغات", "اتمی", "کنفرانس", "العربية", "الجزيرة", "فارسی"]
+
+ - description: "Extract Hebrew hashtags"
+ text: "#עַל־יְדֵי #וכו׳ #מ״כ"
+ expected: ["עַל־יְדֵי", "וכו׳", "מ״כ"]
+
+ - description: "Extract Thai hashtags"
+ text: "#ผู้เริ่ม #การเมือง #รายละเอียด #นักท่องเที่ยว #ของขวัญ #สนามบิน #เดินทาง #ประธาน"
+ expected: ["ผู้เริ่ม", "การเมือง", "รายละเอียด", "นักท่องเที่ยว", "ของขวัญ", "สนามบิน", "เดินทาง", "ประธาน"]
+
+ - description: "Extract Arabic hashtags with Zero-Width Non-Joiner"
+ text: "#أيبيإم #میخواهم"
+ expected: ["أيبيإم", "میخواهم"]
+
+ - description: "Extract Amharic hashtag"
+ text: "የአላህ መልእክተኛ ሰለላሁ ዓለይሂ ወሰለም #ኢትዮሙስሊምስ"
+ expected: ["ኢትዮሙስሊምስ"]
+
+ - description: "Extract Sinhala hashtag with Zero-Width Joiner (U+200D)"
+ text: "#ශ්රීලංකා"
+ expected: ["ශ්රීලංකා"]
+
+ - description: "Extract Arabic and Persian hashtags with numbers"
+ text: "#۳۴۵هشتگ #هشتگ۶۷۸ #ســـلام_عليكم_٤٠٦"
+ expected: ["۳۴۵هشتگ","هشتگ۶۷۸","ســـلام_عليكم_٤٠٦"]
+
+ - description: "Extract Hindi hashtags"
+ text: "#महात्मा #महात्मा_१२३४ #१२३४ गांधी"
+ expected: ["महात्मा","महात्मा_१२३४"]
+
+ - description: "Extract Indic script hashtags"
+ text: "#বাংলা #ગુજરાતી #ಕನ್ನಡ #മലയാളം #ଓଡ଼ିଆ #ਪੰਜਾਬੀ #සිංහල #தமிழ் #తెలుగు"
+ expected: ["বাংলা","ગુજરાતી","ಕನ್ನಡ","മലയാളം","ଓଡ଼ିଆ","ਪੰਜਾਬੀ","සිංහල","தமிழ்","తెలుగు"]
+
+ - description: "Extract Tibetan hashtags"
+ text: "#བོད་སྐད་ #བོད་སྐད།"
+ expected: ["བོད་སྐད་","བོད་སྐད།"]
+
+ - description: "Extract Khmer, Burmese, Laotian hashtags"
+ text: "#មហាត្មះគន្ធី #မြင့်မြတ်သော #ຊີວະສາດ"
+ expected: ["មហាត្មះគន្ធី","မြင့်မြတ်သော","ຊີວະສາດ"]
+
+ - description: "Extract Greek hashtag"
+ text: "#Μαχάτμα_Γκάντι ήταν Ινδός πολιτικός"
+ expected: ["Μαχάτμα_Γκάντι"]
+
+ - description: "Extract Armenian and Georgian hashtags"
+ text: "#Մահաթմա #მაჰათმა"
+ expected: ["Մահաթմա","მაჰათმა"]
+
+ - description: "DO NOT extract hashtags without a letter"
+ text: "#_ #1_2 #122 #〃"
+ expected: []
+
+ - description: "Extract a hastag at the start"
+ text: "#hashtag here"
+ expected: ["hashtag"]
+
+ - description: "Extract a hastag at the end"
+ text: "test a #hashtag"
+ expected: ["hashtag"]
+
+ - description: "Extract a hastag in the middle"
+ text: "test a #hashtag in a string"
+ expected: ["hashtag"]
+
+ - description: "Extract only a valid hashtag"
+ text: "#123 a #hashtag in a string"
+ expected: ["hashtag"]
+
+ - description: "Extract a hashtag in a string of multi-byte characters"
+ text: "会議中 #hashtag 会議中"
+ expected: ["hashtag"]
+
+ - description: "Extract multiple valid hashtags"
+ text: "One #two three #four"
+ expected: ["two", "four"]
+
+ - description: "Extract a non-latin hashtag"
+ text: "Hashtags in #русский!"
+ expected: ["русский"]
+
+ - description: "Extract multiple non-latin hashtags"
+ text: "Hashtags in #中文, #日本語, #한국말, and #русский! Try it out!"
+ expected: ["中文,", "日本語,", "한국말,", "русский"]
diff --git a/vendor/gopkg.in/bsm/ratelimit.v1/Makefile b/vendor/gopkg.in/bsm/ratelimit.v1/Makefile
new file mode 100644
index 0000000..aa713cc
--- /dev/null
+++ b/vendor/gopkg.in/bsm/ratelimit.v1/Makefile
@@ -0,0 +1,16 @@
+default: test
+
+testdeps:
+ @go get github.com/onsi/ginkgo
+ @go get github.com/onsi/gomega
+
+test: testdeps
+ @go test ./...
+
+testrace: testdeps
+ @go test ./... -race
+
+testall: test testrace
+
+bench:
+ @go test ./... -run=NONE -bench=.
diff --git a/vendor/gopkg.in/bsm/ratelimit.v1/README.md b/vendor/gopkg.in/bsm/ratelimit.v1/README.md
new file mode 100644
index 0000000..bf48e94
--- /dev/null
+++ b/vendor/gopkg.in/bsm/ratelimit.v1/README.md
@@ -0,0 +1,54 @@
+# RateLimit [![Build Status](https://travis-ci.org/bsm/ratelimit.png?branch=master)](https://travis-ci.org/bsm/ratelimit)
+
+Simple, thread-safe Go rate-limiter.
+Inspired by Antti Huima's algorithm on http://stackoverflow.com/a/668327
+
+### Example
+
+```go
+package main
+
+import (
+ "github.com/bsm/ratelimit"
+ "log"
+)
+
+func main() {
+ // Create a new rate-limiter, allowing up-to 10 calls
+ // per second
+ rl := ratelimit.New(10, time.Second)
+
+ for i:=0; i<20; i++ {
+ if rl.Limit() {
+ fmt.Println("DOH! Over limit!")
+ } else {
+ fmt.Println("OK")
+ }
+ }
+}
+```
+
+### Licence
+
+```
+Copyright (c) 2015 Black Square Media
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+```
diff --git a/vendor/gopkg.in/bsm/ratelimit.v1/ratelimit.go b/vendor/gopkg.in/bsm/ratelimit.v1/ratelimit.go
new file mode 100644
index 0000000..d006009
--- /dev/null
+++ b/vendor/gopkg.in/bsm/ratelimit.v1/ratelimit.go
@@ -0,0 +1,96 @@
+/*
+Simple, thread-safe Go rate-limiter.
+Inspired by Antti Huima's algorithm on http://stackoverflow.com/a/668327
+
+Example:
+
+ // Create a new rate-limiter, allowing up-to 10 calls
+ // per second
+ rl := ratelimit.New(10, time.Second)
+
+ for i:=0; i<20; i++ {
+ if rl.Limit() {
+ fmt.Println("DOH! Over limit!")
+ } else {
+ fmt.Println("OK")
+ }
+ }
+*/
+package ratelimit
+
+import (
+ "sync/atomic"
+ "time"
+)
+
+// RateLimiter instances are thread-safe.
+type RateLimiter struct {
+ rate, allowance, max, unit, lastCheck uint64
+}
+
+// New creates a new rate limiter instance
+func New(rate int, per time.Duration) *RateLimiter {
+ nano := uint64(per)
+ if nano < 1 {
+ nano = uint64(time.Second)
+ }
+ if rate < 1 {
+ rate = 1
+ }
+
+ return &RateLimiter{
+ rate: uint64(rate), // store the rate
+ allowance: uint64(rate) * nano, // set our allowance to max in the beginning
+ max: uint64(rate) * nano, // remember our maximum allowance
+ unit: nano, // remember our unit size
+
+ lastCheck: unixNano(),
+ }
+}
+
+// UpdateRate allows to update the allowed rate
+func (rl *RateLimiter) UpdateRate(rate int) {
+ atomic.StoreUint64(&rl.rate, uint64(rate))
+ atomic.StoreUint64(&rl.max, uint64(rate)*rl.unit)
+}
+
+// Limit returns true if rate was exceeded
+func (rl *RateLimiter) Limit() bool {
+ // Calculate the number of ns that have passed since our last call
+ now := unixNano()
+ passed := now - atomic.SwapUint64(&rl.lastCheck, now)
+
+ // Add them to our allowance
+ rate := atomic.LoadUint64(&rl.rate)
+ current := atomic.AddUint64(&rl.allowance, passed*rate)
+
+ // Ensure our allowance is not over maximum
+ if max := atomic.LoadUint64(&rl.max); current > max {
+ atomic.AddUint64(&rl.allowance, max-current)
+ current = max
+ }
+
+ // If our allowance is less than one unit, rate-limit!
+ if current < rl.unit {
+ return true
+ }
+
+ // Not limited, subtract a unit
+ atomic.AddUint64(&rl.allowance, -rl.unit)
+ return false
+}
+
+// Undo reverts the last Limit() call, returning consumed allowance
+func (rl *RateLimiter) Undo() {
+ current := atomic.AddUint64(&rl.allowance, rl.unit)
+
+ // Ensure our allowance is not over maximum
+ if max := atomic.LoadUint64(&rl.max); current > max {
+ atomic.AddUint64(&rl.allowance, max-current)
+ }
+}
+
+// now as unix nanoseconds
+func unixNano() uint64 {
+ return uint64(time.Now().UnixNano())
+}
diff --git a/vendor/gopkg.in/mgo.v2/LICENSE b/vendor/gopkg.in/mgo.v2/LICENSE
new file mode 100644
index 0000000..770c767
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/LICENSE
@@ -0,0 +1,25 @@
+mgo - MongoDB driver for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/mgo.v2/Makefile b/vendor/gopkg.in/mgo.v2/Makefile
new file mode 100644
index 0000000..d1027d4
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/Makefile
@@ -0,0 +1,5 @@
+startdb:
+ @harness/setup.sh start
+
+stopdb:
+ @harness/setup.sh stop
diff --git a/vendor/gopkg.in/mgo.v2/README.md b/vendor/gopkg.in/mgo.v2/README.md
new file mode 100644
index 0000000..f4e452c
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/README.md
@@ -0,0 +1,4 @@
+The MongoDB driver for Go
+-------------------------
+
+Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.
diff --git a/vendor/gopkg.in/mgo.v2/auth.go b/vendor/gopkg.in/mgo.v2/auth.go
new file mode 100644
index 0000000..dc26e52
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/auth.go
@@ -0,0 +1,467 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "sync"
+
+ "gopkg.in/mgo.v2/bson"
+ "gopkg.in/mgo.v2/internal/scram"
+)
+
+type authCmd struct {
+ Authenticate int
+
+ Nonce string
+ User string
+ Key string
+}
+
+type startSaslCmd struct {
+ StartSASL int `bson:"startSasl"`
+}
+
+type authResult struct {
+ ErrMsg string
+ Ok bool
+}
+
+type getNonceCmd struct {
+ GetNonce int
+}
+
+type getNonceResult struct {
+ Nonce string
+ Err string "$err"
+ Code int
+}
+
+type logoutCmd struct {
+ Logout int
+}
+
+type saslCmd struct {
+ Start int `bson:"saslStart,omitempty"`
+ Continue int `bson:"saslContinue,omitempty"`
+ ConversationId int `bson:"conversationId,omitempty"`
+ Mechanism string `bson:"mechanism,omitempty"`
+ Payload []byte
+}
+
+type saslResult struct {
+ Ok bool `bson:"ok"`
+ NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
+ Done bool
+
+ ConversationId int `bson:"conversationId"`
+ Payload []byte
+ ErrMsg string
+}
+
+type saslStepper interface {
+ Step(serverData []byte) (clientData []byte, done bool, err error)
+ Close()
+}
+
+func (socket *mongoSocket) getNonce() (nonce string, err error) {
+ socket.Lock()
+ for socket.cachedNonce == "" && socket.dead == nil {
+ debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
+ socket.gotNonce.Wait()
+ }
+ if socket.cachedNonce == "mongos" {
+ socket.Unlock()
+ return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
+ }
+ debugf("Socket %p to %s: got nonce", socket, socket.addr)
+ nonce, err = socket.cachedNonce, socket.dead
+ socket.cachedNonce = ""
+ socket.Unlock()
+ if err != nil {
+ nonce = ""
+ }
+ return
+}
+
+func (socket *mongoSocket) resetNonce() {
+ debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
+ op := &queryOp{}
+ op.query = &getNonceCmd{GetNonce: 1}
+ op.collection = "admin.$cmd"
+ op.limit = -1
+ op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+ if err != nil {
+ socket.kill(errors.New("getNonce: "+err.Error()), true)
+ return
+ }
+ result := &getNonceResult{}
+ err = bson.Unmarshal(docData, &result)
+ if err != nil {
+ socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
+ return
+ }
+ debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
+ if result.Code == 13390 {
+ // mongos doesn't yet support auth (see http://j.mp/mongos-auth)
+ result.Nonce = "mongos"
+ } else if result.Nonce == "" {
+ var msg string
+ if result.Err != "" {
+ msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
+ } else {
+ msg = "Got an empty nonce"
+ }
+ socket.kill(errors.New(msg), true)
+ return
+ }
+ socket.Lock()
+ if socket.cachedNonce != "" {
+ socket.Unlock()
+ panic("resetNonce: nonce already cached")
+ }
+ socket.cachedNonce = result.Nonce
+ socket.gotNonce.Signal()
+ socket.Unlock()
+ }
+ err := socket.Query(op)
+ if err != nil {
+ socket.kill(errors.New("resetNonce: "+err.Error()), true)
+ }
+}
+
+func (socket *mongoSocket) Login(cred Credential) error {
+ socket.Lock()
+ if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
+ cred.Mechanism = "SCRAM-SHA-1"
+ }
+ for _, sockCred := range socket.creds {
+ if sockCred == cred {
+ debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
+ socket.Unlock()
+ return nil
+ }
+ }
+ if socket.dropLogout(cred) {
+ debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
+ socket.creds = append(socket.creds, cred)
+ socket.Unlock()
+ return nil
+ }
+ socket.Unlock()
+
+ debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
+
+ var err error
+ switch cred.Mechanism {
+ case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
+ err = socket.loginClassic(cred)
+ case "PLAIN":
+ err = socket.loginPlain(cred)
+ case "MONGODB-X509":
+ err = socket.loginX509(cred)
+ default:
+ // Try SASL for everything else, if it is available.
+ err = socket.loginSASL(cred)
+ }
+
+ if err != nil {
+ debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
+ } else {
+ debugf("Socket %p to %s: login successful", socket, socket.addr)
+ }
+ return err
+}
+
+func (socket *mongoSocket) loginClassic(cred Credential) error {
+ // Note that this only works properly because this function is
+ // synchronous, which means the nonce won't get reset while we're
+ // using it and any other login requests will block waiting for a
+ // new nonce provided in the defer call below.
+ nonce, err := socket.getNonce()
+ if err != nil {
+ return err
+ }
+ defer socket.resetNonce()
+
+ psum := md5.New()
+ psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
+
+ ksum := md5.New()
+ ksum.Write([]byte(nonce + cred.Username))
+ ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
+
+ key := hex.EncodeToString(ksum.Sum(nil))
+
+ cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
+ res := authResult{}
+ return socket.loginRun(cred.Source, &cmd, &res, func() error {
+ if !res.Ok {
+ return errors.New(res.ErrMsg)
+ }
+ socket.Lock()
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ socket.Unlock()
+ return nil
+ })
+}
+
+type authX509Cmd struct {
+ Authenticate int
+ User string
+ Mechanism string
+}
+
+func (socket *mongoSocket) loginX509(cred Credential) error {
+ cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
+ res := authResult{}
+ return socket.loginRun(cred.Source, &cmd, &res, func() error {
+ if !res.Ok {
+ return errors.New(res.ErrMsg)
+ }
+ socket.Lock()
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ socket.Unlock()
+ return nil
+ })
+}
+
+func (socket *mongoSocket) loginPlain(cred Credential) error {
+ cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
+ res := authResult{}
+ return socket.loginRun(cred.Source, &cmd, &res, func() error {
+ if !res.Ok {
+ return errors.New(res.ErrMsg)
+ }
+ socket.Lock()
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ socket.Unlock()
+ return nil
+ })
+}
+
+func (socket *mongoSocket) loginSASL(cred Credential) error {
+ var sasl saslStepper
+ var err error
+ if cred.Mechanism == "SCRAM-SHA-1" {
+ // SCRAM is handled without external libraries.
+ sasl = saslNewScram(cred)
+ } else if len(cred.ServiceHost) > 0 {
+ sasl, err = saslNew(cred, cred.ServiceHost)
+ } else {
+ sasl, err = saslNew(cred, socket.Server().Addr)
+ }
+ if err != nil {
+ return err
+ }
+ defer sasl.Close()
+
+ // The goal of this logic is to carry a locked socket until the
+ // local SASL step confirms the auth is valid; the socket needs to be
+ // locked so that concurrent action doesn't leave the socket in an
+ // auth state that doesn't reflect the operations that took place.
+ // As a simple case, imagine inverting login=>logout to logout=>login.
+ //
+ // The logic below works because the lock func isn't called concurrently.
+ locked := false
+ lock := func(b bool) {
+ if locked != b {
+ locked = b
+ if b {
+ socket.Lock()
+ } else {
+ socket.Unlock()
+ }
+ }
+ }
+
+ lock(true)
+ defer lock(false)
+
+ start := 1
+ cmd := saslCmd{}
+ res := saslResult{}
+ for {
+ payload, done, err := sasl.Step(res.Payload)
+ if err != nil {
+ return err
+ }
+ if done && res.Done {
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ break
+ }
+ lock(false)
+
+ cmd = saslCmd{
+ Start: start,
+ Continue: 1 - start,
+ ConversationId: res.ConversationId,
+ Mechanism: cred.Mechanism,
+ Payload: payload,
+ }
+ start = 0
+ err = socket.loginRun(cred.Source, &cmd, &res, func() error {
+ // See the comment on lock for why this is necessary.
+ lock(true)
+ if !res.Ok || res.NotOk {
+ return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if done && res.Done {
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ break
+ }
+ }
+
+ return nil
+}
+
+func saslNewScram(cred Credential) *saslScram {
+ credsum := md5.New()
+ credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
+ client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
+ return &saslScram{cred: cred, client: client}
+}
+
+type saslScram struct {
+ cred Credential
+ client *scram.Client
+}
+
+func (s *saslScram) Close() {}
+
+func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
+ more := s.client.Step(serverData)
+ return s.client.Out(), !more, s.client.Err()
+}
+
+func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
+ var mutex sync.Mutex
+ var replyErr error
+ mutex.Lock()
+
+ op := queryOp{}
+ op.query = query
+ op.collection = db + ".$cmd"
+ op.limit = -1
+ op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+ defer mutex.Unlock()
+
+ if err != nil {
+ replyErr = err
+ return
+ }
+
+ err = bson.Unmarshal(docData, result)
+ if err != nil {
+ replyErr = err
+ } else {
+ // Must handle this within the read loop for the socket, so
+ // that concurrent login requests are properly ordered.
+ replyErr = f()
+ }
+ }
+
+ err := socket.Query(&op)
+ if err != nil {
+ return err
+ }
+ mutex.Lock() // Wait.
+ return replyErr
+}
+
+func (socket *mongoSocket) Logout(db string) {
+ socket.Lock()
+ cred, found := socket.dropAuth(db)
+ if found {
+ debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
+ socket.logout = append(socket.logout, cred)
+ }
+ socket.Unlock()
+}
+
+func (socket *mongoSocket) LogoutAll() {
+ socket.Lock()
+ if l := len(socket.creds); l > 0 {
+ debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
+ socket.logout = append(socket.logout, socket.creds...)
+ socket.creds = socket.creds[0:0]
+ }
+ socket.Unlock()
+}
+
+func (socket *mongoSocket) flushLogout() (ops []interface{}) {
+ socket.Lock()
+ if l := len(socket.logout); l > 0 {
+ debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
+ for i := 0; i != l; i++ {
+ op := queryOp{}
+ op.query = &logoutCmd{1}
+ op.collection = socket.logout[i].Source + ".$cmd"
+ op.limit = -1
+ ops = append(ops, &op)
+ }
+ socket.logout = socket.logout[0:0]
+ }
+ socket.Unlock()
+ return
+}
+
+func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
+ for i, sockCred := range socket.creds {
+ if sockCred.Source == db {
+ copy(socket.creds[i:], socket.creds[i+1:])
+ socket.creds = socket.creds[:len(socket.creds)-1]
+ return sockCred, true
+ }
+ }
+ return cred, false
+}
+
+func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
+ for i, sockCred := range socket.logout {
+ if sockCred == cred {
+ copy(socket.logout[i:], socket.logout[i+1:])
+ socket.logout = socket.logout[:len(socket.logout)-1]
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/mgo.v2/bson/LICENSE b/vendor/gopkg.in/mgo.v2/bson/LICENSE
new file mode 100644
index 0000000..8903260
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/bson/LICENSE
@@ -0,0 +1,25 @@
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/mgo.v2/bson/bson.go b/vendor/gopkg.in/mgo.v2/bson/bson.go
new file mode 100644
index 0000000..7fb7f8c
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/bson/bson.go
@@ -0,0 +1,738 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package bson is an implementation of the BSON specification for Go:
+//
+// http://bsonspec.org
+//
+// It was created as part of the mgo MongoDB driver for Go, but is standalone
+// and may be used on its own without the driver.
+package bson
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// --------------------------------------------------------------------------
+// The public API.
+
+// A value implementing the bson.Getter interface will have its GetBSON
+// method called when the given value has to be marshalled, and the result
+// of this method will be marshaled in place of the actual object.
+//
+// If GetBSON returns return a non-nil error, the marshalling procedure
+// will stop and error out with the provided value.
+type Getter interface {
+ GetBSON() (interface{}, error)
+}
+
+// A value implementing the bson.Setter interface will receive the BSON
+// value via the SetBSON method during unmarshaling, and the object
+// itself will not be changed as usual.
+//
+// If setting the value works, the method should return nil or alternatively
+// bson.SetZero to set the respective field to its zero value (nil for
+// pointer types). If SetBSON returns a value of type bson.TypeError, the
+// BSON value will be omitted from a map or slice being decoded and the
+// unmarshalling will continue. If it returns any other non-nil error, the
+// unmarshalling procedure will stop and error out with the provided value.
+//
+// This interface is generally useful in pointer receivers, since the method
+// will want to change the receiver. A type field that implements the Setter
+// interface doesn't have to be a pointer, though.
+//
+// Unlike the usual behavior, unmarshalling onto a value that implements a
+// Setter interface will NOT reset the value to its zero state. This allows
+// the value to decide by itself how to be unmarshalled.
+//
+// For example:
+//
+// type MyString string
+//
+// func (s *MyString) SetBSON(raw bson.Raw) error {
+// return raw.Unmarshal(s)
+// }
+//
+type Setter interface {
+ SetBSON(raw Raw) error
+}
+
+// SetZero may be returned from a SetBSON method to have the value set to
+// its respective zero value. When used in pointer values, this will set the
+// field to nil rather than to the pre-allocated value.
+var SetZero = errors.New("set to zero")
+
+// M is a convenient alias for a map[string]interface{} map, useful for
+// dealing with BSON in a native way. For instance:
+//
+// bson.M{"a": 1, "b": true}
+//
+// There's no special handling for this type in addition to what's done anyway
+// for an equivalent map type. Elements in the map will be dumped in an
+// undefined ordered. See also the bson.D type for an ordered alternative.
+type M map[string]interface{}
+
+// D represents a BSON document containing ordered elements. For example:
+//
+// bson.D{{"a", 1}, {"b", true}}
+//
+// In some situations, such as when creating indexes for MongoDB, the order in
+// which the elements are defined is important. If the order is not important,
+// using a map is generally more comfortable. See bson.M and bson.RawD.
+type D []DocElem
+
+// DocElem is an element of the bson.D document representation.
+type DocElem struct {
+ Name string
+ Value interface{}
+}
+
+// Map returns a map out of the ordered element name/value pairs in d.
+func (d D) Map() (m M) {
+ m = make(M, len(d))
+ for _, item := range d {
+ m[item.Name] = item.Value
+ }
+ return m
+}
+
+// The Raw type represents raw unprocessed BSON documents and elements.
+// Kind is the kind of element as defined per the BSON specification, and
+// Data is the raw unprocessed data for the respective element.
+// Using this type it is possible to unmarshal or marshal values partially.
+//
+// Relevant documentation:
+//
+// http://bsonspec.org/#/specification
+//
+type Raw struct {
+ Kind byte
+ Data []byte
+}
+
+// RawD represents a BSON document containing raw unprocessed elements.
+// This low-level representation may be useful when lazily processing
+// documents of uncertain content, or when manipulating the raw content
+// documents in general.
+type RawD []RawDocElem
+
+// See the RawD type.
+type RawDocElem struct {
+ Name string
+ Value Raw
+}
+
+// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
+// long. MongoDB objects by default have such a property set in their "_id"
+// property.
+//
+// http://www.mongodb.org/display/DOCS/Object+IDs
+type ObjectId string
+
+// ObjectIdHex returns an ObjectId from the provided hex representation.
+// Calling this function with an invalid hex representation will
+// cause a runtime panic. See the IsObjectIdHex function.
+func ObjectIdHex(s string) ObjectId {
+ d, err := hex.DecodeString(s)
+ if err != nil || len(d) != 12 {
+ panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
+ }
+ return ObjectId(d)
+}
+
+// IsObjectIdHex returns whether s is a valid hex representation of
+// an ObjectId. See the ObjectIdHex function.
+func IsObjectIdHex(s string) bool {
+ if len(s) != 24 {
+ return false
+ }
+ _, err := hex.DecodeString(s)
+ return err == nil
+}
+
+// objectIdCounter is atomically incremented when generating a new ObjectId
+// using NewObjectId() function. It's used as a counter part of an id.
+var objectIdCounter uint32 = readRandomUint32()
+
+// readRandomUint32 returns a random objectIdCounter.
+func readRandomUint32() uint32 {
+ var b [4]byte
+ _, err := io.ReadFull(rand.Reader, b[:])
+ if err != nil {
+ panic(fmt.Errorf("cannot read random object id: %v", err))
+ }
+ return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
+}
+
+// machineId stores machine id generated once and used in subsequent calls
+// to NewObjectId function.
+var machineId = readMachineId()
+var processId = os.Getpid()
+
+// readMachineId generates and returns a machine id.
+// If this function fails to get the hostname it will cause a runtime error.
+func readMachineId() []byte {
+ var sum [3]byte
+ id := sum[:]
+ hostname, err1 := os.Hostname()
+ if err1 != nil {
+ _, err2 := io.ReadFull(rand.Reader, id)
+ if err2 != nil {
+ panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
+ }
+ return id
+ }
+ hw := md5.New()
+ hw.Write([]byte(hostname))
+ copy(id, hw.Sum(nil))
+ return id
+}
+
+// NewObjectId returns a new unique ObjectId.
+func NewObjectId() ObjectId {
+ var b [12]byte
+ // Timestamp, 4 bytes, big endian
+ binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
+ // Machine, first 3 bytes of md5(hostname)
+ b[4] = machineId[0]
+ b[5] = machineId[1]
+ b[6] = machineId[2]
+ // Pid, 2 bytes, specs don't specify endianness, but we use big endian.
+ b[7] = byte(processId >> 8)
+ b[8] = byte(processId)
+ // Increment, 3 bytes, big endian
+ i := atomic.AddUint32(&objectIdCounter, 1)
+ b[9] = byte(i >> 16)
+ b[10] = byte(i >> 8)
+ b[11] = byte(i)
+ return ObjectId(b[:])
+}
+
+// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
+// with the provided number of seconds from epoch UTC, and all other parts
+// filled with zeroes. It's not safe to insert a document with an id generated
+// by this method, it is useful only for queries to find documents with ids
+// generated before or after the specified timestamp.
+func NewObjectIdWithTime(t time.Time) ObjectId {
+ var b [12]byte
+ binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
+ return ObjectId(string(b[:]))
+}
+
+// String returns a hex string representation of the id.
+// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
+func (id ObjectId) String() string {
+ return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
+}
+
+// Hex returns a hex representation of the ObjectId.
+func (id ObjectId) Hex() string {
+ return hex.EncodeToString([]byte(id))
+}
+
+// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
+}
+
+var nullBytes = []byte("null")
+
+// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+ if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
+ var v struct {
+ Id json.RawMessage `json:"$oid"`
+ Func struct {
+ Id json.RawMessage
+ } `json:"$oidFunc"`
+ }
+ err := jdec(data, &v)
+ if err == nil {
+ if len(v.Id) > 0 {
+ data = []byte(v.Id)
+ } else {
+ data = []byte(v.Func.Id)
+ }
+ }
+ }
+ if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
+ *id = ""
+ return nil
+ }
+ if len(data) != 26 || data[0] != '"' || data[25] != '"' {
+ return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
+ }
+ var buf [12]byte
+ _, err := hex.Decode(buf[:], data[1:25])
+ if err != nil {
+ return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
+ }
+ *id = ObjectId(string(buf[:]))
+ return nil
+}
+
+// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
+func (id ObjectId) MarshalText() ([]byte, error) {
+ return []byte(fmt.Sprintf("%x", string(id))), nil
+}
+
+// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
+func (id *ObjectId) UnmarshalText(data []byte) error {
+ if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
+ *id = ""
+ return nil
+ }
+ if len(data) != 24 {
+ return fmt.Errorf("invalid ObjectId: %s", data)
+ }
+ var buf [12]byte
+ _, err := hex.Decode(buf[:], data[:])
+ if err != nil {
+ return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
+ }
+ *id = ObjectId(string(buf[:]))
+ return nil
+}
+
+// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
+func (id ObjectId) Valid() bool {
+ return len(id) == 12
+}
+
+// byteSlice returns byte slice of id from start to end.
+// Calling this function with an invalid id will cause a runtime panic.
+func (id ObjectId) byteSlice(start, end int) []byte {
+ if len(id) != 12 {
+ panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
+ }
+ return []byte(string(id)[start:end])
+}
+
+// Time returns the timestamp part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Time() time.Time {
+ // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
+ secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
+ return time.Unix(secs, 0)
+}
+
+// Machine returns the 3-byte machine id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Machine() []byte {
+ return id.byteSlice(4, 7)
+}
+
+// Pid returns the process id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Pid() uint16 {
+ return binary.BigEndian.Uint16(id.byteSlice(7, 9))
+}
+
+// Counter returns the incrementing value part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Counter() int32 {
+ b := id.byteSlice(9, 12)
+ // Counter is stored as big-endian 3-byte value
+ return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
+}
+
+// The Symbol type is similar to a string and is used in languages with a
+// distinct symbol type.
+type Symbol string
+
+// Now returns the current time with millisecond precision. MongoDB stores
+// timestamps with the same precision, so a Time returned from this method
+// will not change after a roundtrip to the database. That's the only reason
+// why this function exists. Using the time.Now function also works fine
+// otherwise.
+func Now() time.Time {
+ return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
+}
+
+// MongoTimestamp is a special internal type used by MongoDB that for some
+// strange reason has its own datatype defined in BSON.
+type MongoTimestamp int64
+
+type orderKey int64
+
+// MaxKey is a special value that compares higher than all other possible BSON
+// values in a MongoDB database.
+var MaxKey = orderKey(1<<63 - 1)
+
+// MinKey is a special value that compares lower than all other possible BSON
+// values in a MongoDB database.
+var MinKey = orderKey(-1 << 63)
+
+type undefined struct{}
+
+// Undefined represents the undefined BSON value.
+var Undefined undefined
+
+// Binary is a representation for non-standard binary values. Any kind should
+// work, but the following are known as of this writing:
+//
+// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
+// 0x01 - Function (!?)
+// 0x02 - Obsolete generic.
+// 0x03 - UUID
+// 0x05 - MD5
+// 0x80 - User defined.
+//
+type Binary struct {
+ Kind byte
+ Data []byte
+}
+
+// RegEx represents a regular expression. The Options field may contain
+// individual characters defining the way in which the pattern should be
+// applied, and must be sorted. Valid options as of this writing are 'i' for
+// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
+// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
+// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
+// unicode. The value of the Options parameter is not verified before being
+// marshaled into the BSON format.
+type RegEx struct {
+ Pattern string
+ Options string
+}
+
+// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
+// will be marshaled as a mapping from identifiers to values that may be
+// used when evaluating the provided Code.
+type JavaScript struct {
+ Code string
+ Scope interface{}
+}
+
+// DBPointer refers to a document id in a namespace.
+//
+// This type is deprecated in the BSON specification and should not be used
+// except for backwards compatibility with ancient applications.
+type DBPointer struct {
+ Namespace string
+ Id ObjectId
+}
+
+const initialBufferSize = 64
+
+func handleErr(err *error) {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ } else if _, ok := r.(externalPanic); ok {
+ panic(r)
+ } else if s, ok := r.(string); ok {
+ *err = errors.New(s)
+ } else if e, ok := r.(error); ok {
+ *err = e
+ } else {
+ panic(r)
+ }
+ }
+}
+
+// Marshal serializes the in value, which may be a map or a struct value.
+// In the case of struct values, only exported fields will be serialized,
+// and the order of serialized fields will match that of the struct itself.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+// "[][,[,]]"
+//
+// `(...) bson:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+//
+// minsize Marshal an int64 value as an int32, if that's feasible
+// while preserving the numeric value.
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the bson keys of other struct fields.
+//
+// Some examples:
+//
+// type T struct {
+// A bool
+// B int "myb"
+// C string "myc,omitempty"
+// D string `bson:",omitempty" json:"jsonkey"`
+// E int64 ",minsize"
+// F int64 "myf,omitempty,minsize"
+// }
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := &encoder{make([]byte, 0, initialBufferSize)}
+ e.addDoc(reflect.ValueOf(in))
+ return e.out, nil
+}
+
+// Unmarshal deserializes data from in into the out value. The out value
+// must be a map, a pointer to a struct, or a pointer to a bson.D value.
+// In the case of struct values, only exported fields will be deserialized.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+// "[][,[,]]"
+//
+// `(...) bson:"[][,[,]]" (...)`
+//
+// The following flags are currently supported during unmarshal (see the
+// Marshal method for other flags):
+//
+// inline Inline the field, which must be a struct or a map.
+// Inlined structs are handled as if its fields were part
+// of the outer struct. An inlined map causes keys that do
+// not match any other struct field to be inserted in the
+// map rather than being discarded as usual.
+//
+// The target field or element types of out may not necessarily match
+// the BSON values of the provided data. The following conversions are
+// made automatically:
+//
+// - Numeric types are converted if at least the integer part of the
+// value would be preserved correctly
+// - Bools are converted to numeric types as 1 or 0
+// - Numeric types are converted to bools as true if not 0 or false otherwise
+// - Binary and string BSON data is converted to a string, array or byte slice
+//
+// If the value would not fit the type and cannot be converted, it's
+// silently skipped.
+//
+// Pointer values are initialized when necessary.
+func Unmarshal(in []byte, out interface{}) (err error) {
+ if raw, ok := out.(*Raw); ok {
+ raw.Kind = 3
+ raw.Data = in
+ return nil
+ }
+ defer handleErr(&err)
+ v := reflect.ValueOf(out)
+ switch v.Kind() {
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ d := newDecoder(in)
+ d.readDocTo(v)
+ case reflect.Struct:
+ return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
+ default:
+ return errors.New("Unmarshal needs a map or a pointer to a struct.")
+ }
+ return nil
+}
+
+// Unmarshal deserializes raw into the out value. If the out value type
+// is not compatible with raw, a *bson.TypeError is returned.
+//
+// See the Unmarshal function documentation for more details on the
+// unmarshalling process.
+func (raw Raw) Unmarshal(out interface{}) (err error) {
+ defer handleErr(&err)
+ v := reflect.ValueOf(out)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ fallthrough
+ case reflect.Map:
+ d := newDecoder(raw.Data)
+ good := d.readElemTo(v, raw.Kind)
+ if !good {
+ return &TypeError{v.Type(), raw.Kind}
+ }
+ case reflect.Struct:
+ return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
+ default:
+ return errors.New("Raw Unmarshal needs a map or a valid pointer.")
+ }
+ return nil
+}
+
+type TypeError struct {
+ Type reflect.Type
+ Kind byte
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+ InlineMap int
+ Zero reflect.Value
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ MinSize bool
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var structMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+ return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ structMapMutex.RLock()
+ sinfo, found := structMap[st]
+ structMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("bson")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "minsize":
+ info.MinSize = true
+ case "inline":
+ inline = true
+ default:
+ msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+ panic(externalPanic(msg))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ panic("Option ,inline needs a struct value or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+ sinfo = &structInfo{
+ fieldsMap,
+ fieldsList,
+ inlineMap,
+ reflect.New(st).Elem(),
+ }
+ structMapMutex.Lock()
+ structMap[st] = sinfo
+ structMapMutex.Unlock()
+ return sinfo, nil
+}
diff --git a/vendor/gopkg.in/mgo.v2/bson/decimal.go b/vendor/gopkg.in/mgo.v2/bson/decimal.go
new file mode 100644
index 0000000..3d2f700
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/bson/decimal.go
@@ -0,0 +1,310 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+ h, l uint64
+}
+
+func (d Decimal128) String() string {
+ var pos int // positive sign
+ var e int // exponent
+ var h, l uint64 // significand high/low
+
+ if d.h>>63&1 == 0 {
+ pos = 1
+ }
+
+ switch d.h >> 58 & (1<<5 - 1) {
+ case 0x1F:
+ return "NaN"
+ case 0x1E:
+ return "-Inf"[pos:]
+ }
+
+ l = d.l
+ if d.h>>61&3 == 3 {
+ // Bits: 1*sign 2*ignored 14*exponent 111*significand.
+ // Implicit 0b100 prefix in significand.
+ e = int(d.h>>47&(1<<14-1)) - 6176
+ //h = 4<<47 | d.h&(1<<47-1)
+ // Spec says all of these values are out of range.
+ h, l = 0, 0
+ } else {
+ // Bits: 1*sign 14*exponent 113*significand
+ e = int(d.h>>49&(1<<14-1)) - 6176
+ h = d.h & (1<<49 - 1)
+ }
+
+ // Would be handled by the logic below, but that's trivial and common.
+ if h == 0 && l == 0 && e == 0 {
+ return "-0"[pos:]
+ }
+
+ var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+ var last = len(repr)
+ var i = len(repr)
+ var dot = len(repr) + e
+ var rem uint32
+Loop:
+ for d9 := 0; d9 < 5; d9++ {
+ h, l, rem = divmod(h, l, 1e9)
+ for d1 := 0; d1 < 9; d1++ {
+ // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+ if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+ e += len(repr) - i
+ i--
+ repr[i] = '.'
+ last = i - 1
+ dot = len(repr) // Unmark.
+ }
+ c := '0' + byte(rem%10)
+ rem /= 10
+ i--
+ repr[i] = c
+ // Handle "0E+3", "1E+3", etc.
+ if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+ last = i
+ break Loop
+ }
+ if c != '0' {
+ last = i
+ }
+ // Break early. Works without it, but why.
+ if dot > i && l == 0 && h == 0 && rem == 0 {
+ break Loop
+ }
+ }
+ }
+ repr[last-1] = '-'
+ last--
+
+ if e > 0 {
+ return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+ }
+ if e < 0 {
+ return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+ }
+ return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+ div64 := uint64(div)
+ a := h >> 32
+ aq := a / div64
+ ar := a % div64
+ b := ar<<32 + h&(1<<32-1)
+ bq := b / div64
+ br := b % div64
+ c := br<<32 + l>>32
+ cq := c / div64
+ cr := c % div64
+ d := cr<<32 + l&(1<<32-1)
+ dq := d / div64
+ dr := d % div64
+ return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+ return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+func ParseDecimal128(s string) (Decimal128, error) {
+ orig := s
+ if s == "" {
+ return dErr(orig)
+ }
+ neg := s[0] == '-'
+ if neg || s[0] == '+' {
+ s = s[1:]
+ }
+
+ if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+ if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+ return dNaN, nil
+ }
+ if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+ if neg {
+ return dNegInf, nil
+ }
+ return dPosInf, nil
+ }
+ return dErr(orig)
+ }
+
+ var h, l uint64
+ var e int
+
+ var add, ovr uint32
+ var mul uint32 = 1
+ var dot = -1
+ var digits = 0
+ var i = 0
+ for i < len(s) {
+ c := s[i]
+ if mul == 1e9 {
+ h, l, ovr = muladd(h, l, mul, add)
+ mul, add = 1, 0
+ if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+ return dErr(orig)
+ }
+ }
+ if c >= '0' && c <= '9' {
+ i++
+ if c > '0' || digits > 0 {
+ digits++
+ }
+ if digits > 34 {
+ if c == '0' {
+ // Exact rounding.
+ e++
+ continue
+ }
+ return dErr(orig)
+ }
+ mul *= 10
+ add *= 10
+ add += uint32(c - '0')
+ continue
+ }
+ if c == '.' {
+ i++
+ if dot >= 0 || i == 1 && len(s) == 1 {
+ return dErr(orig)
+ }
+ if i == len(s) {
+ break
+ }
+ if s[i] < '0' || s[i] > '9' || e > 0 {
+ return dErr(orig)
+ }
+ dot = i
+ continue
+ }
+ break
+ }
+ if i == 0 {
+ return dErr(orig)
+ }
+ if mul > 1 {
+ h, l, ovr = muladd(h, l, mul, add)
+ if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+ return dErr(orig)
+ }
+ }
+ if dot >= 0 {
+ e += dot - i
+ }
+ if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+ i++
+ eneg := s[i] == '-'
+ if eneg || s[i] == '+' {
+ i++
+ if i == len(s) {
+ return dErr(orig)
+ }
+ }
+ n := 0
+ for i < len(s) && n < 1e4 {
+ c := s[i]
+ i++
+ if c < '0' || c > '9' {
+ return dErr(orig)
+ }
+ n *= 10
+ n += int(c - '0')
+ }
+ if eneg {
+ n = -n
+ }
+ e += n
+ for e < -6176 {
+ // Subnormal.
+ var div uint32 = 1
+ for div < 1e9 && e < -6176 {
+ div *= 10
+ e++
+ }
+ var rem uint32
+ h, l, rem = divmod(h, l, div)
+ if rem > 0 {
+ return dErr(orig)
+ }
+ }
+ for e > 6111 {
+ // Clamped.
+ var mul uint32 = 1
+ for mul < 1e9 && e > 6111 {
+ mul *= 10
+ e--
+ }
+ h, l, ovr = muladd(h, l, mul, 0)
+ if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+ return dErr(orig)
+ }
+ }
+ if e < -6176 || e > 6111 {
+ return dErr(orig)
+ }
+ }
+
+ if i < len(s) {
+ return dErr(orig)
+ }
+
+ h |= uint64(e+6176) & uint64(1<<14-1) << 49
+ if neg {
+ h |= 1 << 63
+ }
+ return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+ mul64 := uint64(mul)
+ a := mul64 * (l & (1<<32 - 1))
+ b := a>>32 + mul64*(l>>32)
+ c := b>>32 + mul64*(h&(1<<32-1))
+ d := c>>32 + mul64*(h>>32)
+
+ a = a&(1<<32-1) + uint64(add)
+ b = b&(1<<32-1) + a>>32
+ c = c&(1<<32-1) + b>>32
+ d = d&(1<<32-1) + c>>32
+
+ return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}
diff --git a/vendor/gopkg.in/mgo.v2/bson/decode.go b/vendor/gopkg.in/mgo.v2/bson/decode.go
new file mode 100644
index 0000000..7c2d841
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/bson/decode.go
@@ -0,0 +1,849 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+)
+
+type decoder struct {
+ in []byte
+ i int
+ docType reflect.Type
+}
+
+var typeM = reflect.TypeOf(M{})
+
+func newDecoder(in []byte) *decoder {
+ return &decoder{in, 0, typeM}
+}
+
+// --------------------------------------------------------------------------
+// Some helper functions.
+
+func corrupted() {
+ panic("Document is corrupted")
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of documents.
+
+const (
+ setterUnknown = iota
+ setterNone
+ setterType
+ setterAddr
+)
+
+var setterStyles map[reflect.Type]int
+var setterIface reflect.Type
+var setterMutex sync.RWMutex
+
+func init() {
+ var iface Setter
+ setterIface = reflect.TypeOf(&iface).Elem()
+ setterStyles = make(map[reflect.Type]int)
+}
+
+func setterStyle(outt reflect.Type) int {
+ setterMutex.RLock()
+ style := setterStyles[outt]
+ setterMutex.RUnlock()
+ if style == setterUnknown {
+ setterMutex.Lock()
+ defer setterMutex.Unlock()
+ if outt.Implements(setterIface) {
+ setterStyles[outt] = setterType
+ } else if reflect.PtrTo(outt).Implements(setterIface) {
+ setterStyles[outt] = setterAddr
+ } else {
+ setterStyles[outt] = setterNone
+ }
+ style = setterStyles[outt]
+ }
+ return style
+}
+
+func getSetter(outt reflect.Type, out reflect.Value) Setter {
+ style := setterStyle(outt)
+ if style == setterNone {
+ return nil
+ }
+ if style == setterAddr {
+ if !out.CanAddr() {
+ return nil
+ }
+ out = out.Addr()
+ } else if outt.Kind() == reflect.Ptr && out.IsNil() {
+ out.Set(reflect.New(outt.Elem()))
+ }
+ return out.Interface().(Setter)
+}
+
+func clearMap(m reflect.Value) {
+ var none reflect.Value
+ for _, k := range m.MapKeys() {
+ m.SetMapIndex(k, none)
+ }
+}
+
+func (d *decoder) readDocTo(out reflect.Value) {
+ var elemType reflect.Type
+ outt := out.Type()
+ outk := outt.Kind()
+
+ for {
+ if outk == reflect.Ptr && out.IsNil() {
+ out.Set(reflect.New(outt.Elem()))
+ }
+ if setter := getSetter(outt, out); setter != nil {
+ var raw Raw
+ d.readDocTo(reflect.ValueOf(&raw))
+ err := setter.SetBSON(raw)
+ if _, ok := err.(*TypeError); err != nil && !ok {
+ panic(err)
+ }
+ return
+ }
+ if outk == reflect.Ptr {
+ out = out.Elem()
+ outt = out.Type()
+ outk = out.Kind()
+ continue
+ }
+ break
+ }
+
+ var fieldsMap map[string]fieldInfo
+ var inlineMap reflect.Value
+ start := d.i
+
+ origout := out
+ if outk == reflect.Interface {
+ if d.docType.Kind() == reflect.Map {
+ mv := reflect.MakeMap(d.docType)
+ out.Set(mv)
+ out = mv
+ } else {
+ dv := reflect.New(d.docType).Elem()
+ out.Set(dv)
+ out = dv
+ }
+ outt = out.Type()
+ outk = outt.Kind()
+ }
+
+ docType := d.docType
+ keyType := typeString
+ convertKey := false
+ switch outk {
+ case reflect.Map:
+ keyType = outt.Key()
+ if keyType.Kind() != reflect.String {
+ panic("BSON map must have string keys. Got: " + outt.String())
+ }
+ if keyType != typeString {
+ convertKey = true
+ }
+ elemType = outt.Elem()
+ if elemType == typeIface {
+ d.docType = outt
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(out.Type()))
+ } else if out.Len() > 0 {
+ clearMap(out)
+ }
+ case reflect.Struct:
+ if outt != typeRaw {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ fieldsMap = sinfo.FieldsMap
+ out.Set(sinfo.Zero)
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ if !inlineMap.IsNil() && inlineMap.Len() > 0 {
+ clearMap(inlineMap)
+ }
+ elemType = inlineMap.Type().Elem()
+ if elemType == typeIface {
+ d.docType = inlineMap.Type()
+ }
+ }
+ }
+ case reflect.Slice:
+ switch outt.Elem() {
+ case typeDocElem:
+ origout.Set(d.readDocElems(outt))
+ return
+ case typeRawDocElem:
+ origout.Set(d.readRawDocElems(outt))
+ return
+ }
+ fallthrough
+ default:
+ panic("Unsupported document type for unmarshalling: " + out.Type().String())
+ }
+
+ end := int(d.readInt32())
+ end += d.i - 4
+ if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+ corrupted()
+ }
+ for d.in[d.i] != '\x00' {
+ kind := d.readByte()
+ name := d.readCStr()
+ if d.i >= end {
+ corrupted()
+ }
+
+ switch outk {
+ case reflect.Map:
+ e := reflect.New(elemType).Elem()
+ if d.readElemTo(e, kind) {
+ k := reflect.ValueOf(name)
+ if convertKey {
+ k = k.Convert(keyType)
+ }
+ out.SetMapIndex(k, e)
+ }
+ case reflect.Struct:
+ if outt == typeRaw {
+ d.dropElem(kind)
+ } else {
+ if info, ok := fieldsMap[name]; ok {
+ if info.Inline == nil {
+ d.readElemTo(out.Field(info.Num), kind)
+ } else {
+ d.readElemTo(out.FieldByIndex(info.Inline), kind)
+ }
+ } else if inlineMap.IsValid() {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ e := reflect.New(elemType).Elem()
+ if d.readElemTo(e, kind) {
+ inlineMap.SetMapIndex(reflect.ValueOf(name), e)
+ }
+ } else {
+ d.dropElem(kind)
+ }
+ }
+ case reflect.Slice:
+ }
+
+ if d.i >= end {
+ corrupted()
+ }
+ }
+ d.i++ // '\x00'
+ if d.i != end {
+ corrupted()
+ }
+ d.docType = docType
+
+ if outt == typeRaw {
+ out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
+ }
+}
+
+func (d *decoder) readArrayDocTo(out reflect.Value) {
+ end := int(d.readInt32())
+ end += d.i - 4
+ if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+ corrupted()
+ }
+ i := 0
+ l := out.Len()
+ for d.in[d.i] != '\x00' {
+ if i >= l {
+ panic("Length mismatch on array field")
+ }
+ kind := d.readByte()
+ for d.i < end && d.in[d.i] != '\x00' {
+ d.i++
+ }
+ if d.i >= end {
+ corrupted()
+ }
+ d.i++
+ d.readElemTo(out.Index(i), kind)
+ if d.i >= end {
+ corrupted()
+ }
+ i++
+ }
+ if i != l {
+ panic("Length mismatch on array field")
+ }
+ d.i++ // '\x00'
+ if d.i != end {
+ corrupted()
+ }
+}
+
+func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
+ tmp := make([]reflect.Value, 0, 8)
+ elemType := t.Elem()
+ if elemType == typeRawDocElem {
+ d.dropElem(0x04)
+ return reflect.Zero(t).Interface()
+ }
+
+ end := int(d.readInt32())
+ end += d.i - 4
+ if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+ corrupted()
+ }
+ for d.in[d.i] != '\x00' {
+ kind := d.readByte()
+ for d.i < end && d.in[d.i] != '\x00' {
+ d.i++
+ }
+ if d.i >= end {
+ corrupted()
+ }
+ d.i++
+ e := reflect.New(elemType).Elem()
+ if d.readElemTo(e, kind) {
+ tmp = append(tmp, e)
+ }
+ if d.i >= end {
+ corrupted()
+ }
+ }
+ d.i++ // '\x00'
+ if d.i != end {
+ corrupted()
+ }
+
+ n := len(tmp)
+ slice := reflect.MakeSlice(t, n, n)
+ for i := 0; i != n; i++ {
+ slice.Index(i).Set(tmp[i])
+ }
+ return slice.Interface()
+}
+
+var typeSlice = reflect.TypeOf([]interface{}{})
+var typeIface = typeSlice.Elem()
+
+func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
+ docType := d.docType
+ d.docType = typ
+ slice := make([]DocElem, 0, 8)
+ d.readDocWith(func(kind byte, name string) {
+ e := DocElem{Name: name}
+ v := reflect.ValueOf(&e.Value)
+ if d.readElemTo(v.Elem(), kind) {
+ slice = append(slice, e)
+ }
+ })
+ slicev := reflect.New(typ).Elem()
+ slicev.Set(reflect.ValueOf(slice))
+ d.docType = docType
+ return slicev
+}
+
+func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
+ docType := d.docType
+ d.docType = typ
+ slice := make([]RawDocElem, 0, 8)
+ d.readDocWith(func(kind byte, name string) {
+ e := RawDocElem{Name: name}
+ v := reflect.ValueOf(&e.Value)
+ if d.readElemTo(v.Elem(), kind) {
+ slice = append(slice, e)
+ }
+ })
+ slicev := reflect.New(typ).Elem()
+ slicev.Set(reflect.ValueOf(slice))
+ d.docType = docType
+ return slicev
+}
+
+func (d *decoder) readDocWith(f func(kind byte, name string)) {
+ end := int(d.readInt32())
+ end += d.i - 4
+ if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+ corrupted()
+ }
+ for d.in[d.i] != '\x00' {
+ kind := d.readByte()
+ name := d.readCStr()
+ if d.i >= end {
+ corrupted()
+ }
+ f(kind, name)
+ if d.i >= end {
+ corrupted()
+ }
+ }
+ d.i++ // '\x00'
+ if d.i != end {
+ corrupted()
+ }
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of individual elements within a document.
+
+var blackHole = settableValueOf(struct{}{})
+
+func (d *decoder) dropElem(kind byte) {
+ d.readElemTo(blackHole, kind)
+}
+
+// Attempt to decode an element from the document and put it into out.
+// If the types are not compatible, the returned ok value will be
+// false and out will be unchanged.
+func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
+
+ start := d.i
+
+ if kind == 0x03 {
+ // Delegate unmarshaling of documents.
+ outt := out.Type()
+ outk := out.Kind()
+ switch outk {
+ case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
+ d.readDocTo(out)
+ return true
+ }
+ if setterStyle(outt) != setterNone {
+ d.readDocTo(out)
+ return true
+ }
+ if outk == reflect.Slice {
+ switch outt.Elem() {
+ case typeDocElem:
+ out.Set(d.readDocElems(outt))
+ case typeRawDocElem:
+ out.Set(d.readRawDocElems(outt))
+ default:
+ d.readDocTo(blackHole)
+ }
+ return true
+ }
+ d.readDocTo(blackHole)
+ return true
+ }
+
+ var in interface{}
+
+ switch kind {
+ case 0x01: // Float64
+ in = d.readFloat64()
+ case 0x02: // UTF-8 string
+ in = d.readStr()
+ case 0x03: // Document
+ panic("Can't happen. Handled above.")
+ case 0x04: // Array
+ outt := out.Type()
+ if setterStyle(outt) != setterNone {
+ // Skip the value so its data is handed to the setter below.
+ d.dropElem(kind)
+ break
+ }
+ for outt.Kind() == reflect.Ptr {
+ outt = outt.Elem()
+ }
+ switch outt.Kind() {
+ case reflect.Array:
+ d.readArrayDocTo(out)
+ return true
+ case reflect.Slice:
+ in = d.readSliceDoc(outt)
+ default:
+ in = d.readSliceDoc(typeSlice)
+ }
+ case 0x05: // Binary
+ b := d.readBinary()
+ if b.Kind == 0x00 || b.Kind == 0x02 {
+ in = b.Data
+ } else {
+ in = b
+ }
+ case 0x06: // Undefined (obsolete, but still seen in the wild)
+ in = Undefined
+ case 0x07: // ObjectId
+ in = ObjectId(d.readBytes(12))
+ case 0x08: // Bool
+ in = d.readBool()
+ case 0x09: // Timestamp
+ // MongoDB handles timestamps as milliseconds.
+ i := d.readInt64()
+ if i == -62135596800000 {
+ in = time.Time{} // In UTC for convenience.
+ } else {
+ in = time.Unix(i/1e3, i%1e3*1e6)
+ }
+ case 0x0A: // Nil
+ in = nil
+ case 0x0B: // RegEx
+ in = d.readRegEx()
+ case 0x0C:
+ in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
+ case 0x0D: // JavaScript without scope
+ in = JavaScript{Code: d.readStr()}
+ case 0x0E: // Symbol
+ in = Symbol(d.readStr())
+ case 0x0F: // JavaScript with scope
+ d.i += 4 // Skip length
+ js := JavaScript{d.readStr(), make(M)}
+ d.readDocTo(reflect.ValueOf(js.Scope))
+ in = js
+ case 0x10: // Int32
+ in = int(d.readInt32())
+ case 0x11: // Mongo-specific timestamp
+ in = MongoTimestamp(d.readInt64())
+ case 0x12: // Int64
+ in = d.readInt64()
+ case 0x13: // Decimal128
+ in = Decimal128{
+ l: uint64(d.readInt64()),
+ h: uint64(d.readInt64()),
+ }
+ case 0x7F: // Max key
+ in = MaxKey
+ case 0xFF: // Min key
+ in = MinKey
+ default:
+ panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
+ }
+
+ outt := out.Type()
+
+ if outt == typeRaw {
+ out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
+ return true
+ }
+
+ if setter := getSetter(outt, out); setter != nil {
+ err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
+ if err == SetZero {
+ out.Set(reflect.Zero(outt))
+ return true
+ }
+ if err == nil {
+ return true
+ }
+ if _, ok := err.(*TypeError); !ok {
+ panic(err)
+ }
+ return false
+ }
+
+ if in == nil {
+ out.Set(reflect.Zero(outt))
+ return true
+ }
+
+ outk := outt.Kind()
+
+ // Dereference and initialize pointer if necessary.
+ first := true
+ for outk == reflect.Ptr {
+ if !out.IsNil() {
+ out = out.Elem()
+ } else {
+ elem := reflect.New(outt.Elem())
+ if first {
+ // Only set if value is compatible.
+ first = false
+ defer func(out, elem reflect.Value) {
+ if good {
+ out.Set(elem)
+ }
+ }(out, elem)
+ } else {
+ out.Set(elem)
+ }
+ out = elem
+ }
+ outt = out.Type()
+ outk = outt.Kind()
+ }
+
+ inv := reflect.ValueOf(in)
+ if outt == inv.Type() {
+ out.Set(inv)
+ return true
+ }
+
+ switch outk {
+ case reflect.Interface:
+ out.Set(inv)
+ return true
+ case reflect.String:
+ switch inv.Kind() {
+ case reflect.String:
+ out.SetString(inv.String())
+ return true
+ case reflect.Slice:
+ if b, ok := in.([]byte); ok {
+ out.SetString(string(b))
+ return true
+ }
+ case reflect.Int, reflect.Int64:
+ if outt == typeJSONNumber {
+ out.SetString(strconv.FormatInt(inv.Int(), 10))
+ return true
+ }
+ case reflect.Float64:
+ if outt == typeJSONNumber {
+ out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
+ return true
+ }
+ }
+ case reflect.Slice, reflect.Array:
+ // Remember, array (0x04) slices are built with the correct
+ // element type. If we are here, must be a cross BSON kind
+ // conversion (e.g. 0x05 unmarshalling on string).
+ if outt.Elem().Kind() != reflect.Uint8 {
+ break
+ }
+ switch inv.Kind() {
+ case reflect.String:
+ slice := []byte(inv.String())
+ out.Set(reflect.ValueOf(slice))
+ return true
+ case reflect.Slice:
+ switch outt.Kind() {
+ case reflect.Array:
+ reflect.Copy(out, inv)
+ case reflect.Slice:
+ out.SetBytes(inv.Bytes())
+ }
+ return true
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch inv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out.SetInt(inv.Int())
+ return true
+ case reflect.Float32, reflect.Float64:
+ out.SetInt(int64(inv.Float()))
+ return true
+ case reflect.Bool:
+ if inv.Bool() {
+ out.SetInt(1)
+ } else {
+ out.SetInt(0)
+ }
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ panic("can't happen: no uint types in BSON (!?)")
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch inv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out.SetUint(uint64(inv.Int()))
+ return true
+ case reflect.Float32, reflect.Float64:
+ out.SetUint(uint64(inv.Float()))
+ return true
+ case reflect.Bool:
+ if inv.Bool() {
+ out.SetUint(1)
+ } else {
+ out.SetUint(0)
+ }
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ panic("Can't happen. No uint types in BSON.")
+ }
+ case reflect.Float32, reflect.Float64:
+ switch inv.Kind() {
+ case reflect.Float32, reflect.Float64:
+ out.SetFloat(inv.Float())
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out.SetFloat(float64(inv.Int()))
+ return true
+ case reflect.Bool:
+ if inv.Bool() {
+ out.SetFloat(1)
+ } else {
+ out.SetFloat(0)
+ }
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ panic("Can't happen. No uint types in BSON?")
+ }
+ case reflect.Bool:
+ switch inv.Kind() {
+ case reflect.Bool:
+ out.SetBool(inv.Bool())
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out.SetBool(inv.Int() != 0)
+ return true
+ case reflect.Float32, reflect.Float64:
+ out.SetBool(inv.Float() != 0)
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ panic("Can't happen. No uint types in BSON?")
+ }
+ case reflect.Struct:
+ if outt == typeURL && inv.Kind() == reflect.String {
+ u, err := url.Parse(inv.String())
+ if err != nil {
+ panic(err)
+ }
+ out.Set(reflect.ValueOf(u).Elem())
+ return true
+ }
+ if outt == typeBinary {
+ if b, ok := in.([]byte); ok {
+ out.Set(reflect.ValueOf(Binary{Data: b}))
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// --------------------------------------------------------------------------
+// Parsers of basic types.
+
+func (d *decoder) readRegEx() RegEx {
+ re := RegEx{}
+ re.Pattern = d.readCStr()
+ re.Options = d.readCStr()
+ return re
+}
+
+func (d *decoder) readBinary() Binary {
+ l := d.readInt32()
+ b := Binary{}
+ b.Kind = d.readByte()
+ b.Data = d.readBytes(l)
+ if b.Kind == 0x02 && len(b.Data) >= 4 {
+ // Weird obsolete format with redundant length.
+ b.Data = b.Data[4:]
+ }
+ return b
+}
+
+func (d *decoder) readStr() string {
+ l := d.readInt32()
+ b := d.readBytes(l - 1)
+ if d.readByte() != '\x00' {
+ corrupted()
+ }
+ return string(b)
+}
+
+func (d *decoder) readCStr() string {
+ start := d.i
+ end := start
+ l := len(d.in)
+ for ; end != l; end++ {
+ if d.in[end] == '\x00' {
+ break
+ }
+ }
+ d.i = end + 1
+ if d.i > l {
+ corrupted()
+ }
+ return string(d.in[start:end])
+}
+
+func (d *decoder) readBool() bool {
+ b := d.readByte()
+ if b == 0 {
+ return false
+ }
+ if b == 1 {
+ return true
+ }
+ panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
+}
+
+func (d *decoder) readFloat64() float64 {
+ return math.Float64frombits(uint64(d.readInt64()))
+}
+
+func (d *decoder) readInt32() int32 {
+ b := d.readBytes(4)
+ return int32((uint32(b[0]) << 0) |
+ (uint32(b[1]) << 8) |
+ (uint32(b[2]) << 16) |
+ (uint32(b[3]) << 24))
+}
+
+func (d *decoder) readInt64() int64 {
+ b := d.readBytes(8)
+ return int64((uint64(b[0]) << 0) |
+ (uint64(b[1]) << 8) |
+ (uint64(b[2]) << 16) |
+ (uint64(b[3]) << 24) |
+ (uint64(b[4]) << 32) |
+ (uint64(b[5]) << 40) |
+ (uint64(b[6]) << 48) |
+ (uint64(b[7]) << 56))
+}
+
+func (d *decoder) readByte() byte {
+ i := d.i
+ d.i++
+ if d.i > len(d.in) {
+ corrupted()
+ }
+ return d.in[i]
+}
+
+func (d *decoder) readBytes(length int32) []byte {
+ if length < 0 {
+ corrupted()
+ }
+ start := d.i
+ d.i += int(length)
+ if d.i < start || d.i > len(d.in) {
+ corrupted()
+ }
+ return d.in[start : start+int(length)]
+}
diff --git a/vendor/gopkg.in/mgo.v2/bson/encode.go b/vendor/gopkg.in/mgo.v2/bson/encode.go
new file mode 100644
index 0000000..add39e8
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/bson/encode.go
@@ -0,0 +1,514 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// --------------------------------------------------------------------------
+// Some internal infrastructure.
+
+var (
+ typeBinary = reflect.TypeOf(Binary{})
+ typeObjectId = reflect.TypeOf(ObjectId(""))
+ typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
+ typeSymbol = reflect.TypeOf(Symbol(""))
+ typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
+ typeOrderKey = reflect.TypeOf(MinKey)
+ typeDocElem = reflect.TypeOf(DocElem{})
+ typeRawDocElem = reflect.TypeOf(RawDocElem{})
+ typeRaw = reflect.TypeOf(Raw{})
+ typeURL = reflect.TypeOf(url.URL{})
+ typeTime = reflect.TypeOf(time.Time{})
+ typeString = reflect.TypeOf("")
+ typeJSONNumber = reflect.TypeOf(json.Number(""))
+)
+
+const itoaCacheSize = 32
+
+var itoaCache []string
+
+func init() {
+ itoaCache = make([]string, itoaCacheSize)
+ for i := 0; i != itoaCacheSize; i++ {
+ itoaCache[i] = strconv.Itoa(i)
+ }
+}
+
+func itoa(i int) string {
+ if i < itoaCacheSize {
+ return itoaCache[i]
+ }
+ return strconv.Itoa(i)
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of the document value itself.
+
+type encoder struct {
+ out []byte
+}
+
+func (e *encoder) addDoc(v reflect.Value) {
+ for {
+ if vi, ok := v.Interface().(Getter); ok {
+ getv, err := vi.GetBSON()
+ if err != nil {
+ panic(err)
+ }
+ v = reflect.ValueOf(getv)
+ continue
+ }
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+
+ if v.Type() == typeRaw {
+ raw := v.Interface().(Raw)
+ if raw.Kind != 0x03 && raw.Kind != 0x00 {
+ panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+ }
+ if len(raw.Data) == 0 {
+ panic("Attempted to marshal empty Raw document")
+ }
+ e.addBytes(raw.Data...)
+ return
+ }
+
+ start := e.reserveInt32()
+
+ switch v.Kind() {
+ case reflect.Map:
+ e.addMap(v)
+ case reflect.Struct:
+ e.addStruct(v)
+ case reflect.Array, reflect.Slice:
+ e.addSlice(v)
+ default:
+ panic("Can't marshal " + v.Type().String() + " as a BSON document")
+ }
+
+ e.addBytes(0)
+ e.setInt32(start, int32(len(e.out)-start))
+}
+
+func (e *encoder) addMap(v reflect.Value) {
+ for _, k := range v.MapKeys() {
+ e.addElem(k.String(), v.MapIndex(k), false)
+ }
+}
+
+func (e *encoder) addStruct(v reflect.Value) {
+ sinfo, err := getStructInfo(v.Type())
+ if err != nil {
+ panic(err)
+ }
+ var value reflect.Value
+ if sinfo.InlineMap >= 0 {
+ m := v.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ for _, k := range m.MapKeys() {
+ ks := k.String()
+ if _, found := sinfo.FieldsMap[ks]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
+ }
+ e.addElem(ks, m.MapIndex(k), false)
+ }
+ }
+ }
+ for _, info := range sinfo.FieldsList {
+ if info.Inline == nil {
+ value = v.Field(info.Num)
+ } else {
+ value = v.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.addElem(info.Key, value, info.MinSize)
+ }
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Ptr, reflect.Interface:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ if vt == typeTime {
+ return v.Interface().(time.Time).IsZero()
+ }
+ for i := 0; i < v.NumField(); i++ {
+ if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (e *encoder) addSlice(v reflect.Value) {
+ vi := v.Interface()
+ if d, ok := vi.(D); ok {
+ for _, elem := range d {
+ e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+ }
+ return
+ }
+ if d, ok := vi.(RawD); ok {
+ for _, elem := range d {
+ e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+ }
+ return
+ }
+ l := v.Len()
+ et := v.Type().Elem()
+ if et == typeDocElem {
+ for i := 0; i < l; i++ {
+ elem := v.Index(i).Interface().(DocElem)
+ e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+ }
+ return
+ }
+ if et == typeRawDocElem {
+ for i := 0; i < l; i++ {
+ elem := v.Index(i).Interface().(RawDocElem)
+ e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+ }
+ return
+ }
+ for i := 0; i < l; i++ {
+ e.addElem(itoa(i), v.Index(i), false)
+ }
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of elements in a document.
+
+func (e *encoder) addElemName(kind byte, name string) {
+ e.addBytes(kind)
+ e.addBytes([]byte(name)...)
+ e.addBytes(0)
+}
+
+func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
+
+ if !v.IsValid() {
+ e.addElemName(0x0A, name)
+ return
+ }
+
+ if getter, ok := v.Interface().(Getter); ok {
+ getv, err := getter.GetBSON()
+ if err != nil {
+ panic(err)
+ }
+ e.addElem(name, reflect.ValueOf(getv), minSize)
+ return
+ }
+
+ switch v.Kind() {
+
+ case reflect.Interface:
+ e.addElem(name, v.Elem(), minSize)
+
+ case reflect.Ptr:
+ e.addElem(name, v.Elem(), minSize)
+
+ case reflect.String:
+ s := v.String()
+ switch v.Type() {
+ case typeObjectId:
+ if len(s) != 12 {
+ panic("ObjectIDs must be exactly 12 bytes long (got " +
+ strconv.Itoa(len(s)) + ")")
+ }
+ e.addElemName(0x07, name)
+ e.addBytes([]byte(s)...)
+ case typeSymbol:
+ e.addElemName(0x0E, name)
+ e.addStr(s)
+ case typeJSONNumber:
+ n := v.Interface().(json.Number)
+ if i, err := n.Int64(); err == nil {
+ e.addElemName(0x12, name)
+ e.addInt64(i)
+ } else if f, err := n.Float64(); err == nil {
+ e.addElemName(0x01, name)
+ e.addFloat64(f)
+ } else {
+ panic("failed to convert json.Number to a number: " + s)
+ }
+ default:
+ e.addElemName(0x02, name)
+ e.addStr(s)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ e.addElemName(0x01, name)
+ e.addFloat64(v.Float())
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ u := v.Uint()
+ if int64(u) < 0 {
+ panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
+ } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
+ e.addElemName(0x10, name)
+ e.addInt32(int32(u))
+ } else {
+ e.addElemName(0x12, name)
+ e.addInt64(int64(u))
+ }
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch v.Type() {
+ case typeMongoTimestamp:
+ e.addElemName(0x11, name)
+ e.addInt64(v.Int())
+
+ case typeOrderKey:
+ if v.Int() == int64(MaxKey) {
+ e.addElemName(0x7F, name)
+ } else {
+ e.addElemName(0xFF, name)
+ }
+
+ default:
+ i := v.Int()
+ if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
+ // It fits into an int32, encode as such.
+ e.addElemName(0x10, name)
+ e.addInt32(int32(i))
+ } else {
+ e.addElemName(0x12, name)
+ e.addInt64(i)
+ }
+ }
+
+ case reflect.Bool:
+ e.addElemName(0x08, name)
+ if v.Bool() {
+ e.addBytes(1)
+ } else {
+ e.addBytes(0)
+ }
+
+ case reflect.Map:
+ e.addElemName(0x03, name)
+ e.addDoc(v)
+
+ case reflect.Slice:
+ vt := v.Type()
+ et := vt.Elem()
+ if et.Kind() == reflect.Uint8 {
+ e.addElemName(0x05, name)
+ e.addBinary(0x00, v.Bytes())
+ } else if et == typeDocElem || et == typeRawDocElem {
+ e.addElemName(0x03, name)
+ e.addDoc(v)
+ } else {
+ e.addElemName(0x04, name)
+ e.addDoc(v)
+ }
+
+ case reflect.Array:
+ et := v.Type().Elem()
+ if et.Kind() == reflect.Uint8 {
+ e.addElemName(0x05, name)
+ if v.CanAddr() {
+ e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
+ } else {
+ n := v.Len()
+ e.addInt32(int32(n))
+ e.addBytes(0x00)
+ for i := 0; i < n; i++ {
+ el := v.Index(i)
+ e.addBytes(byte(el.Uint()))
+ }
+ }
+ } else {
+ e.addElemName(0x04, name)
+ e.addDoc(v)
+ }
+
+ case reflect.Struct:
+ switch s := v.Interface().(type) {
+
+ case Raw:
+ kind := s.Kind
+ if kind == 0x00 {
+ kind = 0x03
+ }
+ if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
+ panic("Attempted to marshal empty Raw document")
+ }
+ e.addElemName(kind, name)
+ e.addBytes(s.Data...)
+
+ case Binary:
+ e.addElemName(0x05, name)
+ e.addBinary(s.Kind, s.Data)
+
+ case Decimal128:
+ e.addElemName(0x13, name)
+ e.addInt64(int64(s.l))
+ e.addInt64(int64(s.h))
+
+ case DBPointer:
+ e.addElemName(0x0C, name)
+ e.addStr(s.Namespace)
+ if len(s.Id) != 12 {
+ panic("ObjectIDs must be exactly 12 bytes long (got " +
+ strconv.Itoa(len(s.Id)) + ")")
+ }
+ e.addBytes([]byte(s.Id)...)
+
+ case RegEx:
+ e.addElemName(0x0B, name)
+ e.addCStr(s.Pattern)
+ e.addCStr(s.Options)
+
+ case JavaScript:
+ if s.Scope == nil {
+ e.addElemName(0x0D, name)
+ e.addStr(s.Code)
+ } else {
+ e.addElemName(0x0F, name)
+ start := e.reserveInt32()
+ e.addStr(s.Code)
+ e.addDoc(reflect.ValueOf(s.Scope))
+ e.setInt32(start, int32(len(e.out)-start))
+ }
+
+ case time.Time:
+ // MongoDB handles timestamps as milliseconds.
+ e.addElemName(0x09, name)
+ e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
+
+ case url.URL:
+ e.addElemName(0x02, name)
+ e.addStr(s.String())
+
+ case undefined:
+ e.addElemName(0x06, name)
+
+ default:
+ e.addElemName(0x03, name)
+ e.addDoc(v)
+ }
+
+ default:
+ panic("Can't marshal " + v.Type().String() + " in a BSON document")
+ }
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of base types.
+
+func (e *encoder) addBinary(subtype byte, v []byte) {
+ if subtype == 0x02 {
+ // Wonder how that brilliant idea came to life. Obsolete, luckily.
+ e.addInt32(int32(len(v) + 4))
+ e.addBytes(subtype)
+ e.addInt32(int32(len(v)))
+ } else {
+ e.addInt32(int32(len(v)))
+ e.addBytes(subtype)
+ }
+ e.addBytes(v...)
+}
+
+func (e *encoder) addStr(v string) {
+ e.addInt32(int32(len(v) + 1))
+ e.addCStr(v)
+}
+
+func (e *encoder) addCStr(v string) {
+ e.addBytes([]byte(v)...)
+ e.addBytes(0)
+}
+
+func (e *encoder) reserveInt32() (pos int) {
+ pos = len(e.out)
+ e.addBytes(0, 0, 0, 0)
+ return pos
+}
+
+func (e *encoder) setInt32(pos int, v int32) {
+ e.out[pos+0] = byte(v)
+ e.out[pos+1] = byte(v >> 8)
+ e.out[pos+2] = byte(v >> 16)
+ e.out[pos+3] = byte(v >> 24)
+}
+
+func (e *encoder) addInt32(v int32) {
+ u := uint32(v)
+ e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
+}
+
+func (e *encoder) addInt64(v int64) {
+ u := uint64(v)
+ e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
+ byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
+}
+
+func (e *encoder) addFloat64(v float64) {
+ e.addInt64(int64(math.Float64bits(v)))
+}
+
+func (e *encoder) addBytes(v ...byte) {
+ e.out = append(e.out, v...)
+}
diff --git a/vendor/gopkg.in/mgo.v2/bson/json.go b/vendor/gopkg.in/mgo.v2/bson/json.go
new file mode 100644
index 0000000..09df826
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/bson/json.go
@@ -0,0 +1,380 @@
+package bson
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "gopkg.in/mgo.v2/internal/json"
+ "strconv"
+ "time"
+)
+
+// UnmarshalJSON unmarshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func UnmarshalJSON(data []byte, value interface{}) error {
+ d := json.NewDecoder(bytes.NewBuffer(data))
+ d.Extend(&jsonExt)
+ return d.Decode(value)
+}
+
+// MarshalJSON marshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func MarshalJSON(value interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ e := json.NewEncoder(&buf)
+ e.Extend(&jsonExt)
+ err := e.Encode(value)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// jdec is used internally by the JSON decoding functions
+// so they may unmarshal functions without getting into endless
+// recursion due to keyed objects.
+func jdec(data []byte, value interface{}) error {
+ d := json.NewDecoder(bytes.NewBuffer(data))
+ d.Extend(&funcExt)
+ return d.Decode(value)
+}
+
+var jsonExt json.Extension
+var funcExt json.Extension
+
+// TODO
+// - Shell regular expressions ("/regexp/opts")
+
+func init() {
+ jsonExt.DecodeUnquotedKeys(true)
+ jsonExt.DecodeTrailingCommas(true)
+
+ funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
+ jsonExt.DecodeKeyed("$binary", jdecBinary)
+ jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
+ jsonExt.EncodeType([]byte(nil), jencBinarySlice)
+ jsonExt.EncodeType(Binary{}, jencBinaryType)
+
+ funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
+ funcExt.DecodeFunc("new Date", "$dateFunc", "S")
+ jsonExt.DecodeKeyed("$date", jdecDate)
+ jsonExt.DecodeKeyed("$dateFunc", jdecDate)
+ jsonExt.EncodeType(time.Time{}, jencDate)
+
+ funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
+ jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
+ jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
+
+ funcExt.DecodeConst("undefined", Undefined)
+
+ jsonExt.DecodeKeyed("$regex", jdecRegEx)
+ jsonExt.EncodeType(RegEx{}, jencRegEx)
+
+ funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
+ jsonExt.DecodeKeyed("$oid", jdecObjectId)
+ jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
+ jsonExt.EncodeType(ObjectId(""), jencObjectId)
+
+ funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
+ jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
+
+ funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
+ jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
+ jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
+ jsonExt.EncodeType(int64(0), jencNumberLong)
+ jsonExt.EncodeType(int(0), jencInt)
+
+ funcExt.DecodeConst("MinKey", MinKey)
+ funcExt.DecodeConst("MaxKey", MaxKey)
+ jsonExt.DecodeKeyed("$minKey", jdecMinKey)
+ jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
+ jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
+
+ jsonExt.DecodeKeyed("$undefined", jdecUndefined)
+ jsonExt.EncodeType(Undefined, jencUndefined)
+
+ jsonExt.Extend(&funcExt)
+}
+
+func fbytes(format string, args ...interface{}) []byte {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, format, args...)
+ return buf.Bytes()
+}
+
+func jdecBinary(data []byte) (interface{}, error) {
+ var v struct {
+ Binary []byte `json:"$binary"`
+ Type string `json:"$type"`
+ Func struct {
+ Binary []byte `json:"$binary"`
+ Type int64 `json:"$type"`
+ } `json:"$binaryFunc"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+
+ var binData []byte
+ var binKind int64
+ if v.Type == "" && v.Binary == nil {
+ binData = v.Func.Binary
+ binKind = v.Func.Type
+ } else if v.Type == "" {
+ return v.Binary, nil
+ } else {
+ binData = v.Binary
+ binKind, err = strconv.ParseInt(v.Type, 0, 64)
+ if err != nil {
+ binKind = -1
+ }
+ }
+
+ if binKind == 0 {
+ return binData, nil
+ }
+ if binKind < 0 || binKind > 255 {
+ return nil, fmt.Errorf("invalid type in binary object: %s", data)
+ }
+
+ return Binary{Kind: byte(binKind), Data: binData}, nil
+}
+
+func jencBinarySlice(v interface{}) ([]byte, error) {
+ in := v.([]byte)
+ out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
+ base64.StdEncoding.Encode(out, in)
+ return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
+}
+
+func jencBinaryType(v interface{}) ([]byte, error) {
+ in := v.(Binary)
+ out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
+ base64.StdEncoding.Encode(out, in.Data)
+ return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
+}
+
+const jdateFormat = "2006-01-02T15:04:05.999Z"
+
+func jdecDate(data []byte) (interface{}, error) {
+ var v struct {
+ S string `json:"$date"`
+ Func struct {
+ S string
+ } `json:"$dateFunc"`
+ }
+ _ = jdec(data, &v)
+ if v.S == "" {
+ v.S = v.Func.S
+ }
+ if v.S != "" {
+ for _, format := range []string{jdateFormat, "2006-01-02"} {
+ t, err := time.Parse(format, v.S)
+ if err == nil {
+ return t, nil
+ }
+ }
+ return nil, fmt.Errorf("cannot parse date: %q", v.S)
+ }
+
+ var vn struct {
+ Date struct {
+ N int64 `json:"$numberLong,string"`
+ } `json:"$date"`
+ Func struct {
+ S int64
+ } `json:"$dateFunc"`
+ }
+ err := jdec(data, &vn)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse date: %q", data)
+ }
+ n := vn.Date.N
+ if n == 0 {
+ n = vn.Func.S
+ }
+ return time.Unix(n/1000, n%1000*1e6).UTC(), nil
+}
+
+func jencDate(v interface{}) ([]byte, error) {
+ t := v.(time.Time)
+ return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
+}
+
+func jdecTimestamp(data []byte) (interface{}, error) {
+ var v struct {
+ Func struct {
+ T int32 `json:"t"`
+ I int32 `json:"i"`
+ } `json:"$timestamp"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
+}
+
+func jencTimestamp(v interface{}) ([]byte, error) {
+ ts := uint64(v.(MongoTimestamp))
+ return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
+}
+
+func jdecRegEx(data []byte) (interface{}, error) {
+ var v struct {
+ Regex string `json:"$regex"`
+ Options string `json:"$options"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ return RegEx{v.Regex, v.Options}, nil
+}
+
+func jencRegEx(v interface{}) ([]byte, error) {
+ re := v.(RegEx)
+ type regex struct {
+ Regex string `json:"$regex"`
+ Options string `json:"$options"`
+ }
+ return json.Marshal(regex{re.Pattern, re.Options})
+}
+
+func jdecObjectId(data []byte) (interface{}, error) {
+ var v struct {
+ Id string `json:"$oid"`
+ Func struct {
+ Id string
+ } `json:"$oidFunc"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ if v.Id == "" {
+ v.Id = v.Func.Id
+ }
+ return ObjectIdHex(v.Id), nil
+}
+
+func jencObjectId(v interface{}) ([]byte, error) {
+ return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
+}
+
+func jdecDBRef(data []byte) (interface{}, error) {
+ // TODO Support unmarshaling $ref and $id into the input value.
+ var v struct {
+ Obj map[string]interface{} `json:"$dbrefFunc"`
+ }
+ // TODO Fix this. Must not be required.
+ v.Obj = make(map[string]interface{})
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ return v.Obj, nil
+}
+
+func jdecNumberLong(data []byte) (interface{}, error) {
+ var v struct {
+ N int64 `json:"$numberLong,string"`
+ Func struct {
+ N int64 `json:",string"`
+ } `json:"$numberLongFunc"`
+ }
+ var vn struct {
+ N int64 `json:"$numberLong"`
+ Func struct {
+ N int64
+ } `json:"$numberLongFunc"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ err = jdec(data, &vn)
+ v.N = vn.N
+ v.Func.N = vn.Func.N
+ }
+ if err != nil {
+ return nil, err
+ }
+ if v.N != 0 {
+ return v.N, nil
+ }
+ return v.Func.N, nil
+}
+
+func jencNumberLong(v interface{}) ([]byte, error) {
+ n := v.(int64)
+ f := `{"$numberLong":"%d"}`
+ if n <= 1<<53 {
+ f = `{"$numberLong":%d}`
+ }
+ return fbytes(f, n), nil
+}
+
+func jencInt(v interface{}) ([]byte, error) {
+ n := v.(int)
+ f := `{"$numberLong":"%d"}`
+ if int64(n) <= 1<<53 {
+ f = `%d`
+ }
+ return fbytes(f, n), nil
+}
+
+func jdecMinKey(data []byte) (interface{}, error) {
+ var v struct {
+ N int64 `json:"$minKey"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ if v.N != 1 {
+ return nil, fmt.Errorf("invalid $minKey object: %s", data)
+ }
+ return MinKey, nil
+}
+
+func jdecMaxKey(data []byte) (interface{}, error) {
+ var v struct {
+ N int64 `json:"$maxKey"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ if v.N != 1 {
+ return nil, fmt.Errorf("invalid $maxKey object: %s", data)
+ }
+ return MaxKey, nil
+}
+
+func jencMinMaxKey(v interface{}) ([]byte, error) {
+ switch v.(orderKey) {
+ case MinKey:
+ return []byte(`{"$minKey":1}`), nil
+ case MaxKey:
+ return []byte(`{"$maxKey":1}`), nil
+ }
+ panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
+}
+
+func jdecUndefined(data []byte) (interface{}, error) {
+ var v struct {
+ B bool `json:"$undefined"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ if !v.B {
+ return nil, fmt.Errorf("invalid $undefined object: %s", data)
+ }
+ return Undefined, nil
+}
+
+func jencUndefined(v interface{}) ([]byte, error) {
+ return []byte(`{"$undefined":true}`), nil
+}
diff --git a/vendor/gopkg.in/mgo.v2/bulk.go b/vendor/gopkg.in/mgo.v2/bulk.go
new file mode 100644
index 0000000..072a520
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/bulk.go
@@ -0,0 +1,351 @@
+package mgo
+
+import (
+ "bytes"
+ "sort"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+// Bulk represents an operation that can be prepared with several
+// orthogonal changes before being delivered to the server.
+//
+// MongoDB servers older than version 2.6 do not have proper support for bulk
+// operations, so the driver attempts to map its API as much as possible into
+// the functionality that works. In particular, in those releases updates and
+// removals are sent individually, and inserts are sent in bulk but have
+// suboptimal error reporting compared to more recent versions of the server.
+// See the documentation of BulkErrorCase for details on that.
+//
+// Relevant documentation:
+//
+// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
+//
+type Bulk struct {
+ c *Collection
+ opcount int
+ actions []bulkAction
+ ordered bool
+}
+
+type bulkOp int
+
+const (
+ bulkInsert bulkOp = iota + 1
+ bulkUpdate
+ bulkUpdateAll
+ bulkRemove
+)
+
+type bulkAction struct {
+ op bulkOp
+ docs []interface{}
+ idxs []int
+}
+
+type bulkUpdateOp []interface{}
+type bulkDeleteOp []interface{}
+
+// BulkResult holds the results for a bulk operation.
+type BulkResult struct {
+ Matched int
+ Modified int // Available only for MongoDB 2.6+
+
+ // Be conservative while we understand exactly how to report these
+ // results in a useful and convenient way, and also how to emulate
+ // them with prior servers.
+ private bool
+}
+
+// BulkError holds an error returned from running a Bulk operation.
+// Individual errors may be obtained and inspected via the Cases method.
+type BulkError struct {
+ ecases []BulkErrorCase
+}
+
+func (e *BulkError) Error() string {
+ if len(e.ecases) == 0 {
+ return "invalid BulkError instance: no errors"
+ }
+ if len(e.ecases) == 1 {
+ return e.ecases[0].Err.Error()
+ }
+ msgs := make([]string, 0, len(e.ecases))
+ seen := make(map[string]bool)
+ for _, ecase := range e.ecases {
+ msg := ecase.Err.Error()
+ if !seen[msg] {
+ seen[msg] = true
+ msgs = append(msgs, msg)
+ }
+ }
+ if len(msgs) == 1 {
+ return msgs[0]
+ }
+ var buf bytes.Buffer
+ buf.WriteString("multiple errors in bulk operation:\n")
+ for _, msg := range msgs {
+ buf.WriteString(" - ")
+ buf.WriteString(msg)
+ buf.WriteByte('\n')
+ }
+ return buf.String()
+}
+
+type bulkErrorCases []BulkErrorCase
+
+func (slice bulkErrorCases) Len() int { return len(slice) }
+func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index }
+func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] }
+
+// BulkErrorCase holds an individual error found while attempting a single change
+// within a bulk operation, and the position in which it was enqueued.
+//
+// MongoDB servers older than version 2.6 do not have proper support for bulk
+// operations, so the driver attempts to map its API as much as possible into
+// the functionality that works. In particular, only the last error is reported
+// for bulk inserts and without any positional information, so the Index
+// field is set to -1 in these cases.
+type BulkErrorCase struct {
+ Index int // Position of operation that failed, or -1 if unknown.
+ Err error
+}
+
+// Cases returns all individual errors found while attempting the requested changes.
+//
+// See the documentation of BulkErrorCase for limitations in older MongoDB releases.
+func (e *BulkError) Cases() []BulkErrorCase {
+ return e.ecases
+}
+
+// Bulk returns a value to prepare the execution of a bulk operation.
+func (c *Collection) Bulk() *Bulk {
+ return &Bulk{c: c, ordered: true}
+}
+
+// Unordered puts the bulk operation in unordered mode.
+//
+// In unordered mode the indvidual operations may be sent
+// out of order, which means latter operations may proceed
+// even if prior ones have failed.
+func (b *Bulk) Unordered() {
+ b.ordered = false
+}
+
+func (b *Bulk) action(op bulkOp, opcount int) *bulkAction {
+ var action *bulkAction
+ if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
+ action = &b.actions[len(b.actions)-1]
+ } else if !b.ordered {
+ for i := range b.actions {
+ if b.actions[i].op == op {
+ action = &b.actions[i]
+ break
+ }
+ }
+ }
+ if action == nil {
+ b.actions = append(b.actions, bulkAction{op: op})
+ action = &b.actions[len(b.actions)-1]
+ }
+ for i := 0; i < opcount; i++ {
+ action.idxs = append(action.idxs, b.opcount)
+ b.opcount++
+ }
+ return action
+}
+
+// Insert queues up the provided documents for insertion.
+func (b *Bulk) Insert(docs ...interface{}) {
+ action := b.action(bulkInsert, len(docs))
+ action.docs = append(action.docs, docs...)
+}
+
+// Remove queues up the provided selectors for removing matching documents.
+// Each selector will remove only a single matching document.
+func (b *Bulk) Remove(selectors ...interface{}) {
+ action := b.action(bulkRemove, len(selectors))
+ for _, selector := range selectors {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &deleteOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Flags: 1,
+ Limit: 1,
+ })
+ }
+}
+
+// RemoveAll queues up the provided selectors for removing all matching documents.
+// Each selector will remove all matching documents.
+func (b *Bulk) RemoveAll(selectors ...interface{}) {
+ action := b.action(bulkRemove, len(selectors))
+ for _, selector := range selectors {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &deleteOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Flags: 0,
+ Limit: 0,
+ })
+ }
+}
+
+// Update queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Update(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.Update requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate, len(pairs)/2)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ })
+ }
+}
+
+// UpdateAll queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair updates all documents matching the selector.
+func (b *Bulk) UpdateAll(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.UpdateAll requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate, len(pairs)/2)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ Flags: 2,
+ Multi: true,
+ })
+ }
+}
+
+// Upsert queues up the provided pairs of upserting instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Upsert(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.Update requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate, len(pairs)/2)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ Flags: 1,
+ Upsert: true,
+ })
+ }
+}
+
+// Run runs all the operations queued up.
+//
+// If an error is reported on an unordered bulk operation, the error value may
+// be an aggregation of all issues observed. As an exception to that, Insert
+// operations running on MongoDB versions prior to 2.6 will report the last
+// error only due to a limitation in the wire protocol.
+func (b *Bulk) Run() (*BulkResult, error) {
+ var result BulkResult
+ var berr BulkError
+ var failed bool
+ for i := range b.actions {
+ action := &b.actions[i]
+ var ok bool
+ switch action.op {
+ case bulkInsert:
+ ok = b.runInsert(action, &result, &berr)
+ case bulkUpdate:
+ ok = b.runUpdate(action, &result, &berr)
+ case bulkRemove:
+ ok = b.runRemove(action, &result, &berr)
+ default:
+ panic("unknown bulk operation")
+ }
+ if !ok {
+ failed = true
+ if b.ordered {
+ break
+ }
+ }
+ }
+ if failed {
+ sort.Sort(bulkErrorCases(berr.ecases))
+ return nil, &berr
+ }
+ return &result, nil
+}
+
+func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+ op := &insertOp{b.c.FullName, action.docs, 0}
+ if !b.ordered {
+ op.flags = 1 // ContinueOnError
+ }
+ lerr, err := b.c.writeOp(op, b.ordered)
+ return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+ lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered)
+ if lerr != nil {
+ result.Matched += lerr.N
+ result.Modified += lerr.modified
+ }
+ return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+ lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered)
+ if lerr != nil {
+ result.Matched += lerr.N
+ result.Modified += lerr.modified
+ }
+ return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool {
+ if lerr != nil && len(lerr.ecases) > 0 {
+ for i := 0; i < len(lerr.ecases); i++ {
+ // Map back from the local error index into the visible one.
+ ecase := lerr.ecases[i]
+ idx := ecase.Index
+ if idx >= 0 {
+ idx = action.idxs[idx]
+ }
+ berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err})
+ }
+ return false
+ } else if err != nil {
+ for i := 0; i < len(action.idxs); i++ {
+ berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err})
+ }
+ return false
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/mgo.v2/cluster.go b/vendor/gopkg.in/mgo.v2/cluster.go
new file mode 100644
index 0000000..c3bf8b0
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/cluster.go
@@ -0,0 +1,682 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+// ---------------------------------------------------------------------------
+// Mongo cluster encapsulation.
+//
+// A cluster enables the communication with one or more servers participating
+// in a mongo cluster. This works with individual servers, a replica set,
+// a replica pair, one or multiple mongos routers, etc.
+
+type mongoCluster struct {
+ sync.RWMutex
+ serverSynced sync.Cond
+ userSeeds []string
+ dynaSeeds []string
+ servers mongoServers
+ masters mongoServers
+ references int
+ syncing bool
+ direct bool
+ failFast bool
+ syncCount uint
+ setName string
+ cachedIndex map[string]bool
+ sync chan bool
+ dial dialer
+}
+
+func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
+ cluster := &mongoCluster{
+ userSeeds: userSeeds,
+ references: 1,
+ direct: direct,
+ failFast: failFast,
+ dial: dial,
+ setName: setName,
+ }
+ cluster.serverSynced.L = cluster.RWMutex.RLocker()
+ cluster.sync = make(chan bool, 1)
+ stats.cluster(+1)
+ go cluster.syncServersLoop()
+ return cluster
+}
+
+// Acquire increases the reference count for the cluster.
+func (cluster *mongoCluster) Acquire() {
+ cluster.Lock()
+ cluster.references++
+ debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
+ cluster.Unlock()
+}
+
+// Release decreases the reference count for the cluster. Once
+// it reaches zero, all servers will be closed.
+func (cluster *mongoCluster) Release() {
+ cluster.Lock()
+ if cluster.references == 0 {
+ panic("cluster.Release() with references == 0")
+ }
+ cluster.references--
+ debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
+ if cluster.references == 0 {
+ for _, server := range cluster.servers.Slice() {
+ server.Close()
+ }
+ // Wake up the sync loop so it can die.
+ cluster.syncServers()
+ stats.cluster(-1)
+ }
+ cluster.Unlock()
+}
+
+func (cluster *mongoCluster) LiveServers() (servers []string) {
+ cluster.RLock()
+ for _, serv := range cluster.servers.Slice() {
+ servers = append(servers, serv.Addr)
+ }
+ cluster.RUnlock()
+ return servers
+}
+
+func (cluster *mongoCluster) removeServer(server *mongoServer) {
+ cluster.Lock()
+ cluster.masters.Remove(server)
+ other := cluster.servers.Remove(server)
+ cluster.Unlock()
+ if other != nil {
+ other.Close()
+ log("Removed server ", server.Addr, " from cluster.")
+ }
+ server.Close()
+}
+
+type isMasterResult struct {
+ IsMaster bool
+ Secondary bool
+ Primary string
+ Hosts []string
+ Passives []string
+ Tags bson.D
+ Msg string
+ SetName string `bson:"setName"`
+ MaxWireVersion int `bson:"maxWireVersion"`
+}
+
+func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
+ // Monotonic let's it talk to a slave and still hold the socket.
+ session := newSession(Monotonic, cluster, 10*time.Second)
+ session.setSocket(socket)
+ err := session.Run("ismaster", result)
+ session.Close()
+ return err
+}
+
+type possibleTimeout interface {
+ Timeout() bool
+}
+
+var syncSocketTimeout = 5 * time.Second
+
+func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
+ var syncTimeout time.Duration
+ if raceDetector {
+ // This variable is only ever touched by tests.
+ globalMutex.Lock()
+ syncTimeout = syncSocketTimeout
+ globalMutex.Unlock()
+ } else {
+ syncTimeout = syncSocketTimeout
+ }
+
+ addr := server.Addr
+ log("SYNC Processing ", addr, "...")
+
+ // Retry a few times to avoid knocking a server down for a hiccup.
+ var result isMasterResult
+ var tryerr error
+ for retry := 0; ; retry++ {
+ if retry == 3 || retry == 1 && cluster.failFast {
+ return nil, nil, tryerr
+ }
+ if retry > 0 {
+ // Don't abuse the server needlessly if there's something actually wrong.
+ if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
+ // Give a chance for waiters to timeout as well.
+ cluster.serverSynced.Broadcast()
+ }
+ time.Sleep(syncShortDelay)
+ }
+
+ // It's not clear what would be a good timeout here. Is it
+ // better to wait longer or to retry?
+ socket, _, err := server.AcquireSocket(0, syncTimeout)
+ if err != nil {
+ tryerr = err
+ logf("SYNC Failed to get socket to %s: %v", addr, err)
+ continue
+ }
+ err = cluster.isMaster(socket, &result)
+ socket.Release()
+ if err != nil {
+ tryerr = err
+ logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
+ continue
+ }
+ debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
+ break
+ }
+
+ if cluster.setName != "" && result.SetName != cluster.setName {
+ logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
+ return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
+ }
+
+ if result.IsMaster {
+ debugf("SYNC %s is a master.", addr)
+ if !server.info.Master {
+ // Made an incorrect assumption above, so fix stats.
+ stats.conn(-1, false)
+ stats.conn(+1, true)
+ }
+ } else if result.Secondary {
+ debugf("SYNC %s is a slave.", addr)
+ } else if cluster.direct {
+ logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
+ } else {
+ logf("SYNC %s is neither a master nor a slave.", addr)
+ // Let stats track it as whatever was known before.
+ return nil, nil, errors.New(addr + " is not a master nor slave")
+ }
+
+ info = &mongoServerInfo{
+ Master: result.IsMaster,
+ Mongos: result.Msg == "isdbgrid",
+ Tags: result.Tags,
+ SetName: result.SetName,
+ MaxWireVersion: result.MaxWireVersion,
+ }
+
+ hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
+ if result.Primary != "" {
+ // First in the list to speed up master discovery.
+ hosts = append(hosts, result.Primary)
+ }
+ hosts = append(hosts, result.Hosts...)
+ hosts = append(hosts, result.Passives...)
+
+ debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
+ return info, hosts, nil
+}
+
+type syncKind bool
+
+const (
+ completeSync syncKind = true
+ partialSync syncKind = false
+)
+
+func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
+ cluster.Lock()
+ current := cluster.servers.Search(server.ResolvedAddr)
+ if current == nil {
+ if syncKind == partialSync {
+ cluster.Unlock()
+ server.Close()
+ log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
+ return
+ }
+ cluster.servers.Add(server)
+ if info.Master {
+ cluster.masters.Add(server)
+ log("SYNC Adding ", server.Addr, " to cluster as a master.")
+ } else {
+ log("SYNC Adding ", server.Addr, " to cluster as a slave.")
+ }
+ } else {
+ if server != current {
+ panic("addServer attempting to add duplicated server")
+ }
+ if server.Info().Master != info.Master {
+ if info.Master {
+ log("SYNC Server ", server.Addr, " is now a master.")
+ cluster.masters.Add(server)
+ } else {
+ log("SYNC Server ", server.Addr, " is now a slave.")
+ cluster.masters.Remove(server)
+ }
+ }
+ }
+ server.SetInfo(info)
+ debugf("SYNC Broadcasting availability of server %s", server.Addr)
+ cluster.serverSynced.Broadcast()
+ cluster.Unlock()
+}
+
+func (cluster *mongoCluster) getKnownAddrs() []string {
+ cluster.RLock()
+ max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
+ seen := make(map[string]bool, max)
+ known := make([]string, 0, max)
+
+ add := func(addr string) {
+ if _, found := seen[addr]; !found {
+ seen[addr] = true
+ known = append(known, addr)
+ }
+ }
+
+ for _, addr := range cluster.userSeeds {
+ add(addr)
+ }
+ for _, addr := range cluster.dynaSeeds {
+ add(addr)
+ }
+ for _, serv := range cluster.servers.Slice() {
+ add(serv.Addr)
+ }
+ cluster.RUnlock()
+
+ return known
+}
+
+// syncServers injects a value into the cluster.sync channel to force
+// an iteration of the syncServersLoop function.
+func (cluster *mongoCluster) syncServers() {
+ select {
+ case cluster.sync <- true:
+ default:
+ }
+}
+
+// How long to wait for a checkup of the cluster topology if nothing
+// else kicks a synchronization before that.
+const syncServersDelay = 30 * time.Second
+const syncShortDelay = 500 * time.Millisecond
+
+// syncServersLoop loops while the cluster is alive to keep its idea of
+// the server topology up-to-date. It must be called just once from
+// newCluster. The loop iterates once syncServersDelay has passed, or
+// if somebody injects a value into the cluster.sync channel to force a
+// synchronization. A loop iteration will contact all servers in
+// parallel, ask them about known peers and their own role within the
+// cluster, and then attempt to do the same with all the peers
+// retrieved.
+func (cluster *mongoCluster) syncServersLoop() {
+ for {
+ debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
+
+ cluster.Lock()
+ if cluster.references == 0 {
+ cluster.Unlock()
+ break
+ }
+ cluster.references++ // Keep alive while syncing.
+ direct := cluster.direct
+ cluster.Unlock()
+
+ cluster.syncServersIteration(direct)
+
+ // We just synchronized, so consume any outstanding requests.
+ select {
+ case <-cluster.sync:
+ default:
+ }
+
+ cluster.Release()
+
+ // Hold off before allowing another sync. No point in
+ // burning CPU looking for down servers.
+ if !cluster.failFast {
+ time.Sleep(syncShortDelay)
+ }
+
+ cluster.Lock()
+ if cluster.references == 0 {
+ cluster.Unlock()
+ break
+ }
+ cluster.syncCount++
+ // Poke all waiters so they have a chance to timeout or
+ // restart syncing if they wish to.
+ cluster.serverSynced.Broadcast()
+ // Check if we have to restart immediately either way.
+ restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
+ cluster.Unlock()
+
+ if restart {
+ log("SYNC No masters found. Will synchronize again.")
+ time.Sleep(syncShortDelay)
+ continue
+ }
+
+ debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
+
+ // Hold off until somebody explicitly requests a synchronization
+ // or it's time to check for a cluster topology change again.
+ select {
+ case <-cluster.sync:
+ case <-time.After(syncServersDelay):
+ }
+ }
+ debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
+}
+
+func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
+ cluster.RLock()
+ server := cluster.servers.Search(tcpaddr.String())
+ cluster.RUnlock()
+ if server != nil {
+ return server
+ }
+ return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
+}
+
+func resolveAddr(addr string) (*net.TCPAddr, error) {
+ // Simple cases that do not need actual resolution. Works with IPv4 and v6.
+ if host, port, err := net.SplitHostPort(addr); err == nil {
+ if port, _ := strconv.Atoi(port); port > 0 {
+ zone := ""
+ if i := strings.LastIndex(host, "%"); i >= 0 {
+ zone = host[i+1:]
+ host = host[:i]
+ }
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil
+ }
+ }
+ }
+
+ // Attempt to resolve IPv4 and v6 concurrently.
+ addrChan := make(chan *net.TCPAddr, 2)
+ for _, network := range []string{"udp4", "udp6"} {
+ network := network
+ go func() {
+ // The unfortunate UDP dialing hack allows having a timeout on address resolution.
+ conn, err := net.DialTimeout(network, addr, 10*time.Second)
+ if err != nil {
+ addrChan <- nil
+ } else {
+ addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
+ conn.Close()
+ }
+ }()
+ }
+
+ // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
+ tcpaddr := <-addrChan
+ if tcpaddr == nil || len(tcpaddr.IP) != 4 {
+ var timeout <-chan time.Time
+ if tcpaddr != nil {
+ // Don't wait too long if an IPv6 address is known.
+ timeout = time.After(50 * time.Millisecond)
+ }
+ select {
+ case <-timeout:
+ case tcpaddr2 := <-addrChan:
+ if tcpaddr == nil || tcpaddr2 != nil {
+ // It's an IPv4 address or the only known address. Use it.
+ tcpaddr = tcpaddr2
+ }
+ }
+ }
+
+ if tcpaddr == nil {
+ log("SYNC Failed to resolve server address: ", addr)
+ return nil, errors.New("failed to resolve server address: " + addr)
+ }
+ if tcpaddr.String() != addr {
+ debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
+ }
+ return tcpaddr, nil
+}
+
+type pendingAdd struct {
+ server *mongoServer
+ info *mongoServerInfo
+}
+
+func (cluster *mongoCluster) syncServersIteration(direct bool) {
+ log("SYNC Starting full topology synchronization...")
+
+ var wg sync.WaitGroup
+ var m sync.Mutex
+ notYetAdded := make(map[string]pendingAdd)
+ addIfFound := make(map[string]bool)
+ seen := make(map[string]bool)
+ syncKind := partialSync
+
+ var spawnSync func(addr string, byMaster bool)
+ spawnSync = func(addr string, byMaster bool) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ tcpaddr, err := resolveAddr(addr)
+ if err != nil {
+ log("SYNC Failed to start sync of ", addr, ": ", err.Error())
+ return
+ }
+ resolvedAddr := tcpaddr.String()
+
+ m.Lock()
+ if byMaster {
+ if pending, ok := notYetAdded[resolvedAddr]; ok {
+ delete(notYetAdded, resolvedAddr)
+ m.Unlock()
+ cluster.addServer(pending.server, pending.info, completeSync)
+ return
+ }
+ addIfFound[resolvedAddr] = true
+ }
+ if seen[resolvedAddr] {
+ m.Unlock()
+ return
+ }
+ seen[resolvedAddr] = true
+ m.Unlock()
+
+ server := cluster.server(addr, tcpaddr)
+ info, hosts, err := cluster.syncServer(server)
+ if err != nil {
+ cluster.removeServer(server)
+ return
+ }
+
+ m.Lock()
+ add := direct || info.Master || addIfFound[resolvedAddr]
+ if add {
+ syncKind = completeSync
+ } else {
+ notYetAdded[resolvedAddr] = pendingAdd{server, info}
+ }
+ m.Unlock()
+ if add {
+ cluster.addServer(server, info, completeSync)
+ }
+ if !direct {
+ for _, addr := range hosts {
+ spawnSync(addr, info.Master)
+ }
+ }
+ }()
+ }
+
+ knownAddrs := cluster.getKnownAddrs()
+ for _, addr := range knownAddrs {
+ spawnSync(addr, false)
+ }
+ wg.Wait()
+
+ if syncKind == completeSync {
+ logf("SYNC Synchronization was complete (got data from primary).")
+ for _, pending := range notYetAdded {
+ cluster.removeServer(pending.server)
+ }
+ } else {
+ logf("SYNC Synchronization was partial (cannot talk to primary).")
+ for _, pending := range notYetAdded {
+ cluster.addServer(pending.server, pending.info, partialSync)
+ }
+ }
+
+ cluster.Lock()
+ mastersLen := cluster.masters.Len()
+ logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen)
+
+ // Update dynamic seeds, but only if we have any good servers. Otherwise,
+ // leave them alone for better chances of a successful sync in the future.
+ if syncKind == completeSync {
+ dynaSeeds := make([]string, cluster.servers.Len())
+ for i, server := range cluster.servers.Slice() {
+ dynaSeeds[i] = server.Addr
+ }
+ cluster.dynaSeeds = dynaSeeds
+ debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
+ }
+ cluster.Unlock()
+}
+
+// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
+// true, it will attempt to return a socket to a slave server. If it is
+// false, the socket will necessarily be to a master server.
+func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
+ var started time.Time
+ var syncCount uint
+ warnedLimit := false
+ for {
+ cluster.RLock()
+ for {
+ mastersLen := cluster.masters.Len()
+ slavesLen := cluster.servers.Len() - mastersLen
+ debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
+ if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk {
+ break
+ }
+ if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() {
+ break
+ }
+ if started.IsZero() {
+ // Initialize after fast path above.
+ started = time.Now()
+ syncCount = cluster.syncCount
+ } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
+ cluster.RUnlock()
+ return nil, errors.New("no reachable servers")
+ }
+ log("Waiting for servers to synchronize...")
+ cluster.syncServers()
+
+ // Remember: this will release and reacquire the lock.
+ cluster.serverSynced.Wait()
+ }
+
+ var server *mongoServer
+ if slaveOk {
+ server = cluster.servers.BestFit(mode, serverTags)
+ } else {
+ server = cluster.masters.BestFit(mode, nil)
+ }
+ cluster.RUnlock()
+
+ if server == nil {
+ // Must have failed the requested tags. Sleep to avoid spinning.
+ time.Sleep(1e8)
+ continue
+ }
+
+ s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
+ if err == errPoolLimit {
+ if !warnedLimit {
+ warnedLimit = true
+ log("WARNING: Per-server connection limit reached.")
+ }
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ if err != nil {
+ cluster.removeServer(server)
+ cluster.syncServers()
+ continue
+ }
+ if abended && !slaveOk {
+ var result isMasterResult
+ err := cluster.isMaster(s, &result)
+ if err != nil || !result.IsMaster {
+ logf("Cannot confirm server %s as master (%v)", server.Addr, err)
+ s.Release()
+ cluster.syncServers()
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ }
+ return s, nil
+ }
+ panic("unreached")
+}
+
+func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
+ cluster.Lock()
+ if cluster.cachedIndex == nil {
+ cluster.cachedIndex = make(map[string]bool)
+ }
+ if exists {
+ cluster.cachedIndex[cacheKey] = true
+ } else {
+ delete(cluster.cachedIndex, cacheKey)
+ }
+ cluster.Unlock()
+}
+
+func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
+ cluster.RLock()
+ if cluster.cachedIndex != nil {
+ result = cluster.cachedIndex[cacheKey]
+ }
+ cluster.RUnlock()
+ return
+}
+
+func (cluster *mongoCluster) ResetIndexCache() {
+ cluster.Lock()
+ cluster.cachedIndex = make(map[string]bool)
+ cluster.Unlock()
+}
diff --git a/vendor/gopkg.in/mgo.v2/doc.go b/vendor/gopkg.in/mgo.v2/doc.go
new file mode 100644
index 0000000..859fd9b
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/doc.go
@@ -0,0 +1,31 @@
+// Package mgo offers a rich MongoDB driver for Go.
+//
+// Details about the mgo project (pronounced as "mango") are found
+// in its web page:
+//
+// http://labix.org/mgo
+//
+// Usage of the driver revolves around the concept of sessions. To
+// get started, obtain a session using the Dial function:
+//
+// session, err := mgo.Dial(url)
+//
+// This will establish one or more connections with the cluster of
+// servers defined by the url parameter. From then on, the cluster
+// may be queried with multiple consistency rules (see SetMode) and
+// documents retrieved with statements such as:
+//
+// c := session.DB(database).C(collection)
+// err := c.Find(query).One(&result)
+//
+// New sessions are typically created by calling session.Copy on the
+// initial session obtained at dial time. These new sessions will share
+// the same cluster information and connection pool, and may be easily
+// handed into other methods and functions for organizing logic.
+// Every session created must have its Close method called at the end
+// of its life time, so its resources may be put back in the pool or
+// collected, depending on the case.
+//
+// For more details, see the documentation for the types and methods.
+//
+package mgo
diff --git a/vendor/gopkg.in/mgo.v2/gridfs.go b/vendor/gopkg.in/mgo.v2/gridfs.go
new file mode 100644
index 0000000..4214720
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/gridfs.go
@@ -0,0 +1,761 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "errors"
+ "hash"
+ "io"
+ "os"
+ "sync"
+ "time"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+type GridFS struct {
+ Files *Collection
+ Chunks *Collection
+}
+
+type gfsFileMode int
+
+const (
+ gfsClosed gfsFileMode = 0
+ gfsReading gfsFileMode = 1
+ gfsWriting gfsFileMode = 2
+)
+
+type GridFile struct {
+ m sync.Mutex
+ c sync.Cond
+ gfs *GridFS
+ mode gfsFileMode
+ err error
+
+ chunk int
+ offset int64
+
+ wpending int
+ wbuf []byte
+ wsum hash.Hash
+
+ rbuf []byte
+ rcache *gfsCachedChunk
+
+ doc gfsFile
+}
+
+type gfsFile struct {
+ Id interface{} "_id"
+ ChunkSize int "chunkSize"
+ UploadDate time.Time "uploadDate"
+ Length int64 ",minsize"
+ MD5 string
+ Filename string ",omitempty"
+ ContentType string "contentType,omitempty"
+ Metadata *bson.Raw ",omitempty"
+}
+
+type gfsChunk struct {
+ Id interface{} "_id"
+ FilesId interface{} "files_id"
+ N int
+ Data []byte
+}
+
+type gfsCachedChunk struct {
+ wait sync.Mutex
+ n int
+ data []byte
+ err error
+}
+
+func newGridFS(db *Database, prefix string) *GridFS {
+ return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
+}
+
+func (gfs *GridFS) newFile() *GridFile {
+ file := &GridFile{gfs: gfs}
+ file.c.L = &file.m
+ //runtime.SetFinalizer(file, finalizeFile)
+ return file
+}
+
+func finalizeFile(file *GridFile) {
+ file.Close()
+}
+
+// Create creates a new file with the provided name in the GridFS. If the file
+// name already exists, a new version will be inserted with an up-to-date
+// uploadDate that will cause it to be atomically visible to the Open and
+// OpenId methods. If the file name is not important, an empty name may be
+// provided and the file Id used instead.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// A simple example inserting a new file:
+//
+// func check(err error) {
+// if err != nil {
+// panic(err.String())
+// }
+// }
+// file, err := db.GridFS("fs").Create("myfile.txt")
+// check(err)
+// n, err := file.Write([]byte("Hello world!"))
+// check(err)
+// err = file.Close()
+// check(err)
+// fmt.Printf("%d bytes written\n", n)
+//
+// The io.Writer interface is implemented by *GridFile and may be used to
+// help on the file creation. For example:
+//
+// file, err := db.GridFS("fs").Create("myfile.txt")
+// check(err)
+// messages, err := os.Open("/var/log/messages")
+// check(err)
+// defer messages.Close()
+// err = io.Copy(file, messages)
+// check(err)
+// err = file.Close()
+// check(err)
+//
+func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
+ file = gfs.newFile()
+ file.mode = gfsWriting
+ file.wsum = md5.New()
+ file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
+ return
+}
+
+// OpenId returns the file with the provided id, for reading.
+// If the file isn't found, err will be set to mgo.ErrNotFound.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// The following example will print the first 8192 bytes from the file:
+//
+// func check(err error) {
+// if err != nil {
+// panic(err.String())
+// }
+// }
+// file, err := db.GridFS("fs").OpenId(objid)
+// check(err)
+// b := make([]byte, 8192)
+// n, err := file.Read(b)
+// check(err)
+// fmt.Println(string(b))
+// check(err)
+// err = file.Close()
+// check(err)
+// fmt.Printf("%d bytes read\n", n)
+//
+// The io.Reader interface is implemented by *GridFile and may be used to
+// deal with it. As an example, the following snippet will dump the whole
+// file into the standard output:
+//
+// file, err := db.GridFS("fs").OpenId(objid)
+// check(err)
+// err = io.Copy(os.Stdout, file)
+// check(err)
+// err = file.Close()
+// check(err)
+//
+func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
+ var doc gfsFile
+ err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
+ if err != nil {
+ return
+ }
+ file = gfs.newFile()
+ file.mode = gfsReading
+ file.doc = doc
+ return
+}
+
+// Open returns the most recently uploaded file with the provided
+// name, for reading. If the file isn't found, err will be set
+// to mgo.ErrNotFound.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// The following example will print the first 8192 bytes from the file:
+//
+// file, err := db.GridFS("fs").Open("myfile.txt")
+// check(err)
+// b := make([]byte, 8192)
+// n, err := file.Read(b)
+// check(err)
+// fmt.Println(string(b))
+// check(err)
+// err = file.Close()
+// check(err)
+// fmt.Printf("%d bytes read\n", n)
+//
+// The io.Reader interface is implemented by *GridFile and may be used to
+// deal with it. As an example, the following snippet will dump the whole
+// file into the standard output:
+//
+// file, err := db.GridFS("fs").Open("myfile.txt")
+// check(err)
+// err = io.Copy(os.Stdout, file)
+// check(err)
+// err = file.Close()
+// check(err)
+//
+func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
+ var doc gfsFile
+ err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
+ if err != nil {
+ return
+ }
+ file = gfs.newFile()
+ file.mode = gfsReading
+ file.doc = doc
+ return
+}
+
+// OpenNext opens the next file from iter for reading, sets *file to it,
+// and returns true on the success case. If no more documents are available
+// on iter or an error occurred, *file is set to nil and the result is false.
+// Errors will be available via iter.Err().
+//
+// The iter parameter must be an iterator on the GridFS files collection.
+// Using the GridFS.Find method is an easy way to obtain such an iterator,
+// but any iterator on the collection will work.
+//
+// If the provided *file is non-nil, OpenNext will close it before attempting
+// to iterate to the next element. This means that in a loop one only
+// has to worry about closing files when breaking out of the loop early
+// (break, return, or panic).
+//
+// For example:
+//
+// gfs := db.GridFS("fs")
+// query := gfs.Find(nil).Sort("filename")
+// iter := query.Iter()
+// var f *mgo.GridFile
+// for gfs.OpenNext(iter, &f) {
+// fmt.Printf("Filename: %s\n", f.Name())
+// }
+// if iter.Close() != nil {
+// panic(iter.Close())
+// }
+//
+func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
+ if *file != nil {
+ // Ignoring the error here shouldn't be a big deal
+ // as we're reading the file and the loop iteration
+ // for this file is finished.
+ _ = (*file).Close()
+ }
+ var doc gfsFile
+ if !iter.Next(&doc) {
+ *file = nil
+ return false
+ }
+ f := gfs.newFile()
+ f.mode = gfsReading
+ f.doc = doc
+ *file = f
+ return true
+}
+
+// Find runs query on GridFS's files collection and returns
+// the resulting Query.
+//
+// This logic:
+//
+// gfs := db.GridFS("fs")
+// iter := gfs.Find(nil).Iter()
+//
+// Is equivalent to:
+//
+// files := db.C("fs" + ".files")
+// iter := files.Find(nil).Iter()
+//
+func (gfs *GridFS) Find(query interface{}) *Query {
+ return gfs.Files.Find(query)
+}
+
+// RemoveId deletes the file with the provided id from the GridFS.
+func (gfs *GridFS) RemoveId(id interface{}) error {
+ err := gfs.Files.Remove(bson.M{"_id": id})
+ if err != nil {
+ return err
+ }
+ _, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
+ return err
+}
+
+type gfsDocId struct {
+ Id interface{} "_id"
+}
+
+// Remove deletes all files with the provided name from the GridFS.
+func (gfs *GridFS) Remove(name string) (err error) {
+ iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
+ var doc gfsDocId
+ for iter.Next(&doc) {
+ if e := gfs.RemoveId(doc.Id); e != nil {
+ err = e
+ }
+ }
+ if err == nil {
+ err = iter.Close()
+ }
+ return err
+}
+
+func (file *GridFile) assertMode(mode gfsFileMode) {
+ switch file.mode {
+ case mode:
+ return
+ case gfsWriting:
+ panic("GridFile is open for writing")
+ case gfsReading:
+ panic("GridFile is open for reading")
+ case gfsClosed:
+ panic("GridFile is closed")
+ default:
+ panic("internal error: missing GridFile mode")
+ }
+}
+
+// SetChunkSize sets size of saved chunks. Once the file is written to, it
+// will be split in blocks of that size and each block saved into an
+// independent chunk document. The default chunk size is 255kb.
+//
+// It is a runtime error to call this function once the file has started
+// being written to.
+func (file *GridFile) SetChunkSize(bytes int) {
+ file.assertMode(gfsWriting)
+ debugf("GridFile %p: setting chunk size to %d", file, bytes)
+ file.m.Lock()
+ file.doc.ChunkSize = bytes
+ file.m.Unlock()
+}
+
+// Id returns the current file Id.
+func (file *GridFile) Id() interface{} {
+ return file.doc.Id
+}
+
+// SetId changes the current file Id.
+//
+// It is a runtime error to call this function once the file has started
+// being written to, or when the file is not open for writing.
+func (file *GridFile) SetId(id interface{}) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ file.doc.Id = id
+ file.m.Unlock()
+}
+
+// Name returns the optional file name. An empty string will be returned
+// in case it is unset.
+func (file *GridFile) Name() string {
+ return file.doc.Filename
+}
+
+// SetName changes the optional file name. An empty string may be used to
+// unset it.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetName(name string) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ file.doc.Filename = name
+ file.m.Unlock()
+}
+
+// ContentType returns the optional file content type. An empty string will be
+// returned in case it is unset.
+func (file *GridFile) ContentType() string {
+ return file.doc.ContentType
+}
+
+// ContentType changes the optional file content type. An empty string may be
+// used to unset it.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetContentType(ctype string) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ file.doc.ContentType = ctype
+ file.m.Unlock()
+}
+
+// GetMeta unmarshals the optional "metadata" field associated with the
+// file into the result parameter. The meaning of keys under that field
+// is user-defined. For example:
+//
+// result := struct{ INode int }{}
+// err = file.GetMeta(&result)
+// if err != nil {
+// panic(err.String())
+// }
+// fmt.Printf("inode: %d\n", result.INode)
+//
+func (file *GridFile) GetMeta(result interface{}) (err error) {
+ file.m.Lock()
+ if file.doc.Metadata != nil {
+ err = bson.Unmarshal(file.doc.Metadata.Data, result)
+ }
+ file.m.Unlock()
+ return
+}
+
+// SetMeta changes the optional "metadata" field associated with the
+// file. The meaning of keys under that field is user-defined.
+// For example:
+//
+// file.SetMeta(bson.M{"inode": inode})
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetMeta(metadata interface{}) {
+ file.assertMode(gfsWriting)
+ data, err := bson.Marshal(metadata)
+ file.m.Lock()
+ if err != nil && file.err == nil {
+ file.err = err
+ } else {
+ file.doc.Metadata = &bson.Raw{Data: data}
+ }
+ file.m.Unlock()
+}
+
+// Size returns the file size in bytes.
+func (file *GridFile) Size() (bytes int64) {
+ file.m.Lock()
+ bytes = file.doc.Length
+ file.m.Unlock()
+ return
+}
+
+// MD5 returns the file MD5 as a hex-encoded string.
+func (file *GridFile) MD5() (md5 string) {
+ return file.doc.MD5
+}
+
+// UploadDate returns the file upload time.
+func (file *GridFile) UploadDate() time.Time {
+ return file.doc.UploadDate
+}
+
+// SetUploadDate changes the file upload time.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetUploadDate(t time.Time) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ file.doc.UploadDate = t
+ file.m.Unlock()
+}
+
+// Close flushes any pending changes in case the file is being written
+// to, waits for any background operations to finish, and closes the file.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+func (file *GridFile) Close() (err error) {
+ file.m.Lock()
+ defer file.m.Unlock()
+ if file.mode == gfsWriting {
+ if len(file.wbuf) > 0 && file.err == nil {
+ file.insertChunk(file.wbuf)
+ file.wbuf = file.wbuf[0:0]
+ }
+ file.completeWrite()
+ } else if file.mode == gfsReading && file.rcache != nil {
+ file.rcache.wait.Lock()
+ file.rcache = nil
+ }
+ file.mode = gfsClosed
+ debugf("GridFile %p: closed", file)
+ return file.err
+}
+
+func (file *GridFile) completeWrite() {
+ for file.wpending > 0 {
+ debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
+ file.c.Wait()
+ }
+ if file.err == nil {
+ hexsum := hex.EncodeToString(file.wsum.Sum(nil))
+ if file.doc.UploadDate.IsZero() {
+ file.doc.UploadDate = bson.Now()
+ }
+ file.doc.MD5 = hexsum
+ file.err = file.gfs.Files.Insert(file.doc)
+ }
+ if file.err != nil {
+ file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
+ }
+ if file.err == nil {
+ index := Index{
+ Key: []string{"files_id", "n"},
+ Unique: true,
+ }
+ file.err = file.gfs.Chunks.EnsureIndex(index)
+ }
+}
+
+// Abort cancels an in-progress write, preventing the file from being
+// automically created and ensuring previously written chunks are
+// removed when the file is closed.
+//
+// It is a runtime error to call Abort when the file was not opened
+// for writing.
+func (file *GridFile) Abort() {
+ if file.mode != gfsWriting {
+ panic("file.Abort must be called on file opened for writing")
+ }
+ file.err = errors.New("write aborted")
+}
+
+// Write writes the provided data to the file and returns the
+// number of bytes written and an error in case something
+// wrong happened.
+//
+// The file will internally cache the data so that all but the last
+// chunk sent to the database have the size defined by SetChunkSize.
+// This also means that errors may be deferred until a future call
+// to Write or Close.
+//
+// The parameters and behavior of this function turn the file
+// into an io.Writer.
+func (file *GridFile) Write(data []byte) (n int, err error) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ debugf("GridFile %p: writing %d bytes", file, len(data))
+ defer file.m.Unlock()
+
+ if file.err != nil {
+ return 0, file.err
+ }
+
+ n = len(data)
+ file.doc.Length += int64(n)
+ chunkSize := file.doc.ChunkSize
+
+ if len(file.wbuf)+len(data) < chunkSize {
+ file.wbuf = append(file.wbuf, data...)
+ return
+ }
+
+ // First, flush file.wbuf complementing with data.
+ if len(file.wbuf) > 0 {
+ missing := chunkSize - len(file.wbuf)
+ if missing > len(data) {
+ missing = len(data)
+ }
+ file.wbuf = append(file.wbuf, data[:missing]...)
+ data = data[missing:]
+ file.insertChunk(file.wbuf)
+ file.wbuf = file.wbuf[0:0]
+ }
+
+ // Then, flush all chunks from data without copying.
+ for len(data) > chunkSize {
+ size := chunkSize
+ if size > len(data) {
+ size = len(data)
+ }
+ file.insertChunk(data[:size])
+ data = data[size:]
+ }
+
+ // And append the rest for a future call.
+ file.wbuf = append(file.wbuf, data...)
+
+ return n, file.err
+}
+
+func (file *GridFile) insertChunk(data []byte) {
+ n := file.chunk
+ file.chunk++
+ debugf("GridFile %p: adding to checksum: %q", file, string(data))
+ file.wsum.Write(data)
+
+ for file.doc.ChunkSize*file.wpending >= 1024*1024 {
+ // Hold on.. we got a MB pending.
+ file.c.Wait()
+ if file.err != nil {
+ return
+ }
+ }
+
+ file.wpending++
+
+ debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
+
+ // We may not own the memory of data, so rather than
+ // simply copying it, we'll marshal the document ahead of time.
+ data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
+ if err != nil {
+ file.err = err
+ return
+ }
+
+ go func() {
+ err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
+ file.m.Lock()
+ file.wpending--
+ if err != nil && file.err == nil {
+ file.err = err
+ }
+ file.c.Broadcast()
+ file.m.Unlock()
+ }()
+}
+
+// Seek sets the offset for the next Read or Write on file to
+// offset, interpreted according to whence: 0 means relative to
+// the origin of the file, 1 means relative to the current offset,
+// and 2 means relative to the end. It returns the new offset and
+// an error, if any.
+func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
+ file.m.Lock()
+ debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
+ defer file.m.Unlock()
+ switch whence {
+ case os.SEEK_SET:
+ case os.SEEK_CUR:
+ offset += file.offset
+ case os.SEEK_END:
+ offset += file.doc.Length
+ default:
+ panic("unsupported whence value")
+ }
+ if offset > file.doc.Length {
+ return file.offset, errors.New("seek past end of file")
+ }
+ if offset == file.doc.Length {
+ // If we're seeking to the end of the file,
+ // no need to read anything. This enables
+ // a client to find the size of the file using only the
+ // io.ReadSeeker interface with low overhead.
+ file.offset = offset
+ return file.offset, nil
+ }
+ chunk := int(offset / int64(file.doc.ChunkSize))
+ if chunk+1 == file.chunk && offset >= file.offset {
+ file.rbuf = file.rbuf[int(offset-file.offset):]
+ file.offset = offset
+ return file.offset, nil
+ }
+ file.offset = offset
+ file.chunk = chunk
+ file.rbuf = nil
+ file.rbuf, err = file.getChunk()
+ if err == nil {
+ file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
+ }
+ return file.offset, err
+}
+
+// Read reads into b the next available data from the file and
+// returns the number of bytes written and an error in case
+// something wrong happened. At the end of the file, n will
+// be zero and err will be set to io.EOF.
+//
+// The parameters and behavior of this function turn the file
+// into an io.Reader.
+func (file *GridFile) Read(b []byte) (n int, err error) {
+ file.assertMode(gfsReading)
+ file.m.Lock()
+ debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
+ defer file.m.Unlock()
+ if file.offset == file.doc.Length {
+ return 0, io.EOF
+ }
+ for err == nil {
+ i := copy(b, file.rbuf)
+ n += i
+ file.offset += int64(i)
+ file.rbuf = file.rbuf[i:]
+ if i == len(b) || file.offset == file.doc.Length {
+ break
+ }
+ b = b[i:]
+ file.rbuf, err = file.getChunk()
+ }
+ return n, err
+}
+
+func (file *GridFile) getChunk() (data []byte, err error) {
+ cache := file.rcache
+ file.rcache = nil
+ if cache != nil && cache.n == file.chunk {
+ debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
+ cache.wait.Lock()
+ data, err = cache.data, cache.err
+ } else {
+ debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
+ var doc gfsChunk
+ err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
+ data = doc.Data
+ }
+ file.chunk++
+ if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
+ // Read the next one in background.
+ cache = &gfsCachedChunk{n: file.chunk}
+ cache.wait.Lock()
+ debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
+ // Clone the session to avoid having it closed in between.
+ chunks := file.gfs.Chunks
+ session := chunks.Database.Session.Clone()
+ go func(id interface{}, n int) {
+ defer session.Close()
+ chunks = chunks.With(session)
+ var doc gfsChunk
+ cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
+ cache.data = doc.Data
+ cache.wait.Unlock()
+ }(file.doc.Id, file.chunk)
+ file.rcache = cache
+ }
+ debugf("Returning err: %#v", err)
+ return
+}
diff --git a/vendor/gopkg.in/mgo.v2/internal/json/LICENSE b/vendor/gopkg.in/mgo.v2/internal/json/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/internal/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/mgo.v2/internal/json/decode.go b/vendor/gopkg.in/mgo.v2/internal/json/decode.go
new file mode 100644
index 0000000..ce7c7d2
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/internal/json/decode.go
@@ -0,0 +1,1685 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
+// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
+// reuses the existing map, keeping existing entries. Unmarshal then stores key-
+// value pairs from the JSON object into the map. The map's key type must
+// either be a string or implement encoding.TextUnmarshaler.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by types
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ useNumber bool
+ ext Extension
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else if c == '[' {
+ d.scan.step(&d.scan, ']')
+ } else {
+ // Was inside a function name. Get out of it.
+ d.scan.step(&d.scan, '(')
+ d.scan.step(&d.scan, ')')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+
+ case scanBeginName:
+ d.name(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginName:
+ switch v := d.nameInterface().(type) {
+ case nil, string:
+ return v
+ }
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, v
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, v
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if d.storeKeyed(pv) {
+ return
+ }
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target:
+ // struct or
+ // map[string]T or map[encoding.TextUnmarshaler]T
+ switch v.Kind() {
+ case reflect.Map:
+ // Map key must either have string kind or be an encoding.TextUnmarshaler.
+ t := v.Type()
+ if t.Key().Kind() != reflect.String &&
+ !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+
+ empty := true
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ if !empty && !d.ext.trailingCommas {
+ d.syntaxError("beginning of object key string")
+ }
+ break
+ }
+ empty = false
+ if op == scanBeginName {
+ if !d.ext.unquotedKeys {
+ d.syntaxError("beginning of object key string")
+ }
+ } else if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+ unquotedKey := op == scanBeginName
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ var key []byte
+ if unquotedKey {
+ key = item
+ // TODO Fix code below to quote item when necessary.
+ } else {
+ var ok bool
+ key, ok = unquoteBytes(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ if f == nil && ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kt := v.Type().Key()
+ var kv reflect.Value
+ switch {
+ case kt.Kind() == reflect.String:
+ kv = reflect.ValueOf(key).Convert(v.Type().Key())
+ case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+ kv = reflect.New(v.Type().Key())
+ d.literalStore(item, kv, true)
+ kv = kv.Elem()
+ default:
+ panic("json: Unexpected key type") // should never occur
+ }
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// isNull returns whether there's a null literal at the provided offset.
+func (d *decodeState) isNull(off int) bool {
+ if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' {
+ return false
+ }
+ d.nextscan.reset()
+ for i, c := range d.data[off:] {
+ if i > 4 {
+ return false
+ }
+ switch d.nextscan.step(&d.nextscan, c) {
+ case scanContinue, scanBeginName:
+ continue
+ }
+ break
+ }
+ return true
+}
+
+// name consumes a const or function from d.data[d.off-1:], decoding into the value v.
+// the first byte of the function name has been read already.
+func (d *decodeState) name(v reflect.Value) {
+ if d.isNull(d.off-1) {
+ d.literal(v)
+ return
+ }
+
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if d.storeKeyed(pv) {
+ return
+ }
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over function in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ out := d.nameInterface()
+ if out == nil {
+ v.Set(reflect.Zero(v.Type()))
+ } else {
+ v.Set(reflect.ValueOf(out))
+ }
+ return
+ }
+
+ nameStart := d.off - 1
+
+ op := d.scanWhile(scanContinue)
+
+ name := d.data[nameStart : d.off-1]
+ if op != scanParam {
+ // Back up so the byte just read is consumed next.
+ d.off--
+ d.scan.undo(op)
+ if l, ok := d.convertLiteral(name); ok {
+ d.storeValue(v, l)
+ return
+ }
+ d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+ }
+
+ funcName := string(name)
+ funcData := d.ext.funcs[funcName]
+ if funcData.key == "" {
+ d.error(fmt.Errorf("json: unknown function %q", funcName))
+ }
+
+ // Check type of target:
+ // struct or
+ // map[string]T or map[encoding.TextUnmarshaler]T
+ switch v.Kind() {
+ case reflect.Map:
+ // Map key must either have string kind or be an encoding.TextUnmarshaler.
+ t := v.Type()
+ if t.Key().Kind() != reflect.String &&
+ !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ // TODO Fix case of func field as map.
+ //topv := v
+
+ // Figure out field corresponding to function.
+ key := []byte(funcData.key)
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ v = reflect.New(elemType).Elem()
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ if f == nil && ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ for _, i := range f.index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ }
+
+ // Check for unmarshaler on func field itself.
+ u, ut, pv = d.indirect(v, false)
+ if u != nil {
+ d.off = nameStart
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ var mapElem reflect.Value
+
+ // Parse function arguments.
+ for i := 0; ; i++ {
+ // closing ) - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndParams {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ if i >= len(funcData.args) {
+ d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+ }
+ key := []byte(funcData.args[i])
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ if f == nil && ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kt := v.Type().Key()
+ var kv reflect.Value
+ switch {
+ case kt.Kind() == reflect.String:
+ kv = reflect.ValueOf(key).Convert(v.Type().Key())
+ case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+ kv = reflect.New(v.Type().Key())
+ d.literalStore(key, kv, true)
+ kv = kv.Elem()
+ default:
+ panic("json: Unexpected key type") // should never occur
+ }
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or ).
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndParams {
+ break
+ }
+ if op != scanParam {
+ d.error(errPhase)
+ }
+ }
+}
+
+// keyed attempts to decode an object or function using a keyed doc extension,
+// and returns the value and true on success, or nil and false otherwise.
+func (d *decodeState) keyed() (interface{}, bool) {
+ if len(d.ext.keyed) == 0 {
+ return nil, false
+ }
+
+ unquote := false
+
+ // Look-ahead first key to check for a keyed document extension.
+ d.nextscan.reset()
+ var start, end int
+ for i, c := range d.data[d.off-1:] {
+ switch op := d.nextscan.step(&d.nextscan, c); op {
+ case scanSkipSpace, scanContinue, scanBeginObject:
+ continue
+ case scanBeginLiteral, scanBeginName:
+ unquote = op == scanBeginLiteral
+ start = i
+ continue
+ }
+ end = i
+ break
+ }
+
+ name := d.data[d.off-1+start : d.off-1+end]
+
+ var key []byte
+ var ok bool
+ if unquote {
+ key, ok = unquoteBytes(name)
+ if !ok {
+ d.error(errPhase)
+ }
+ } else {
+ funcData, ok := d.ext.funcs[string(name)]
+ if !ok {
+ return nil, false
+ }
+ key = []byte(funcData.key)
+ }
+
+ decode, ok := d.ext.keyed[string(key)]
+ if !ok {
+ return nil, false
+ }
+
+ d.off--
+ out, err := decode(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return out, true
+}
+
+func (d *decodeState) storeKeyed(v reflect.Value) bool {
+ keyed, ok := d.keyed()
+ if !ok {
+ return false
+ }
+ d.storeValue(v, keyed)
+ return true
+}
+
+var (
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ nullBytes = []byte("null")
+)
+
+func (d *decodeState) storeValue(v reflect.Value, from interface{}) {
+ switch from {
+ case nil:
+ d.literalStore(nullBytes, v, false)
+ return
+ case true:
+ d.literalStore(trueBytes, v, false)
+ return
+ case false:
+ d.literalStore(falseBytes, v, false)
+ return
+ }
+ fromv := reflect.ValueOf(from)
+ for fromv.Kind() == reflect.Ptr && !fromv.IsNil() {
+ fromv = fromv.Elem()
+ }
+ fromt := fromv.Type()
+ for v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ vt := v.Type()
+ if fromt.AssignableTo(vt) {
+ v.Set(fromv)
+ } else if fromt.ConvertibleTo(vt) {
+ v.Set(fromv.Convert(vt))
+ } else {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ }
+}
+
+func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) {
+ if len(name) == 0 {
+ return nil, false
+ }
+ switch name[0] {
+ case 't':
+ if bytes.Equal(name, trueBytes) {
+ return true, true
+ }
+ case 'f':
+ if bytes.Equal(name, falseBytes) {
+ return false, true
+ }
+ case 'n':
+ if bytes.Equal(name, nullBytes) {
+ return nil, true
+ }
+ }
+ if l, ok := d.ext.consts[string(name)]; ok {
+ return l, true
+ }
+ return nil, false
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ case scanBeginName:
+ return d.nameInterface()
+ }
+}
+
+func (d *decodeState) syntaxError(expected string) {
+ msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected)
+ d.error(&SyntaxError{msg, int64(d.off)})
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ if len(v) > 0 && !d.ext.trailingCommas {
+ d.syntaxError("beginning of value")
+ }
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() interface{} {
+ v, ok := d.keyed()
+ if ok {
+ return v
+ }
+
+ m := make(map[string]interface{})
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ if len(m) > 0 && !d.ext.trailingCommas {
+ d.syntaxError("beginning of object key string")
+ }
+ break
+ }
+ if op == scanBeginName {
+ if !d.ext.unquotedKeys {
+ d.syntaxError("beginning of object key string")
+ }
+ } else if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+ unquotedKey := op == scanBeginName
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ var key string
+ if unquotedKey {
+ key = string(item)
+ } else {
+ var ok bool
+ key, ok = unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// nameInterface is like function but returns map[string]interface{}.
+func (d *decodeState) nameInterface() interface{} {
+ v, ok := d.keyed()
+ if ok {
+ return v
+ }
+
+ nameStart := d.off - 1
+
+ op := d.scanWhile(scanContinue)
+
+ name := d.data[nameStart : d.off-1]
+ if op != scanParam {
+ // Back up so the byte just read is consumed next.
+ d.off--
+ d.scan.undo(op)
+ if l, ok := d.convertLiteral(name); ok {
+ return l
+ }
+ d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+ }
+
+ funcName := string(name)
+ funcData := d.ext.funcs[funcName]
+ if funcData.key == "" {
+ d.error(fmt.Errorf("json: unknown function %q", funcName))
+ }
+
+ m := make(map[string]interface{})
+ for i := 0; ; i++ {
+ // Look ahead for ) - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndParams {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ if i >= len(funcData.args) {
+ d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+ }
+ m[funcData.args[i]] = d.valueInterface()
+
+ // Next token must be , or ).
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndParams {
+ break
+ }
+ if op != scanParam {
+ d.error(errPhase)
+ }
+ }
+ return map[string]interface{}{funcData.key: m}
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/gopkg.in/mgo.v2/internal/json/encode.go b/vendor/gopkg.in/mgo.v2/internal/json/encode.go
new file mode 100644
index 0000000..67a0f00
--- /dev/null
+++ b/vendor/gopkg.in/mgo.v2/internal/json/encode.go
@@ -0,0 +1,1256 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON as defined in
+// RFC 4627. The mapping between JSON and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+// This escaping can be disabled using an Encoder with DisableHTMLEscaping.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON value.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects. The map's key type must either be a string
+// or implement encoding.TextMarshaler. The map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON value.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON value.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v, encOpts{escapeHTML: true})
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML