vendor: k8s.io/kubernetes/pkg/client/clientset_generated/clientset@v1.6.1
This commit is contained in:
parent
f7ec2e78cf
commit
03e5f539dd
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Alex Saskevich
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,421 @@
|
||||||
|
govalidator
|
||||||
|
===========
|
||||||
|
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
|
||||||
|
[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator)
|
||||||
|
|
||||||
|
A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
|
||||||
|
|
||||||
|
#### Installation
|
||||||
|
Make sure that Go is installed on your computer.
|
||||||
|
Type the following command in your terminal:
|
||||||
|
|
||||||
|
go get github.com/asaskevich/govalidator
|
||||||
|
|
||||||
|
or you can get specified release of the package with `gopkg.in`:
|
||||||
|
|
||||||
|
go get gopkg.in/asaskevich/govalidator.v4
|
||||||
|
|
||||||
|
After it the package is ready to use.
|
||||||
|
|
||||||
|
|
||||||
|
#### Import package in your project
|
||||||
|
Add following line in your `*.go` file:
|
||||||
|
```go
|
||||||
|
import "github.com/asaskevich/govalidator"
|
||||||
|
```
|
||||||
|
If you are unhappy to use long `govalidator`, you can do something like this:
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
valid "github.com/asaskevich/govalidator"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Activate behavior to require all fields have a validation tag by default
|
||||||
|
`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/asaskevich/govalidator"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
govalidator.SetFieldsRequiredByDefault(true)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Here's some code to explain it:
|
||||||
|
```go
|
||||||
|
// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
|
||||||
|
type exampleStruct struct {
|
||||||
|
Name string ``
|
||||||
|
Email string `valid:"email"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// this, however, will only fail when Email is empty or an invalid email address:
|
||||||
|
type exampleStruct2 struct {
|
||||||
|
Name string `valid:"-"`
|
||||||
|
Email string `valid:"email"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// lastly, this will only fail when Email is an invalid email address but not when it's empty:
|
||||||
|
type exampleStruct2 struct {
|
||||||
|
Name string `valid:"-"`
|
||||||
|
Email string `valid:"email,optional"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
|
||||||
|
##### Custom validator function signature
|
||||||
|
A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
|
||||||
|
```go
|
||||||
|
import "github.com/asaskevich/govalidator"
|
||||||
|
|
||||||
|
// old signature
|
||||||
|
func(i interface{}) bool
|
||||||
|
|
||||||
|
// new signature
|
||||||
|
func(i interface{}, o interface{}) bool
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Adding a custom validator
|
||||||
|
This was changed to prevent data races when accessing custom validators.
|
||||||
|
```go
|
||||||
|
import "github.com/asaskevich/govalidator"
|
||||||
|
|
||||||
|
// before
|
||||||
|
govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
||||||
|
// ...
|
||||||
|
})
|
||||||
|
|
||||||
|
// after
|
||||||
|
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
||||||
|
// ...
|
||||||
|
}))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### List of functions:
|
||||||
|
```go
|
||||||
|
func Abs(value float64) float64
|
||||||
|
func BlackList(str, chars string) string
|
||||||
|
func ByteLength(str string, params ...string) bool
|
||||||
|
func CamelCaseToUnderscore(str string) string
|
||||||
|
func Contains(str, substring string) bool
|
||||||
|
func Count(array []interface{}, iterator ConditionIterator) int
|
||||||
|
func Each(array []interface{}, iterator Iterator)
|
||||||
|
func ErrorByField(e error, field string) string
|
||||||
|
func ErrorsByField(e error) map[string]string
|
||||||
|
func Filter(array []interface{}, iterator ConditionIterator) []interface{}
|
||||||
|
func Find(array []interface{}, iterator ConditionIterator) interface{}
|
||||||
|
func GetLine(s string, index int) (string, error)
|
||||||
|
func GetLines(s string) []string
|
||||||
|
func InRange(value, left, right float64) bool
|
||||||
|
func IsASCII(str string) bool
|
||||||
|
func IsAlpha(str string) bool
|
||||||
|
func IsAlphanumeric(str string) bool
|
||||||
|
func IsBase64(str string) bool
|
||||||
|
func IsByteLength(str string, min, max int) bool
|
||||||
|
func IsCIDR(str string) bool
|
||||||
|
func IsCreditCard(str string) bool
|
||||||
|
func IsDNSName(str string) bool
|
||||||
|
func IsDataURI(str string) bool
|
||||||
|
func IsDialString(str string) bool
|
||||||
|
func IsDivisibleBy(str, num string) bool
|
||||||
|
func IsEmail(str string) bool
|
||||||
|
func IsFilePath(str string) (bool, int)
|
||||||
|
func IsFloat(str string) bool
|
||||||
|
func IsFullWidth(str string) bool
|
||||||
|
func IsHalfWidth(str string) bool
|
||||||
|
func IsHexadecimal(str string) bool
|
||||||
|
func IsHexcolor(str string) bool
|
||||||
|
func IsHost(str string) bool
|
||||||
|
func IsIP(str string) bool
|
||||||
|
func IsIPv4(str string) bool
|
||||||
|
func IsIPv6(str string) bool
|
||||||
|
func IsISBN(str string, version int) bool
|
||||||
|
func IsISBN10(str string) bool
|
||||||
|
func IsISBN13(str string) bool
|
||||||
|
func IsISO3166Alpha2(str string) bool
|
||||||
|
func IsISO3166Alpha3(str string) bool
|
||||||
|
func IsISO4217(str string) bool
|
||||||
|
func IsIn(str string, params ...string) bool
|
||||||
|
func IsInt(str string) bool
|
||||||
|
func IsJSON(str string) bool
|
||||||
|
func IsLatitude(str string) bool
|
||||||
|
func IsLongitude(str string) bool
|
||||||
|
func IsLowerCase(str string) bool
|
||||||
|
func IsMAC(str string) bool
|
||||||
|
func IsMongoID(str string) bool
|
||||||
|
func IsMultibyte(str string) bool
|
||||||
|
func IsNatural(value float64) bool
|
||||||
|
func IsNegative(value float64) bool
|
||||||
|
func IsNonNegative(value float64) bool
|
||||||
|
func IsNonPositive(value float64) bool
|
||||||
|
func IsNull(str string) bool
|
||||||
|
func IsNumeric(str string) bool
|
||||||
|
func IsPort(str string) bool
|
||||||
|
func IsPositive(value float64) bool
|
||||||
|
func IsPrintableASCII(str string) bool
|
||||||
|
func IsRFC3339(str string) bool
|
||||||
|
func IsRGBcolor(str string) bool
|
||||||
|
func IsRequestURI(rawurl string) bool
|
||||||
|
func IsRequestURL(rawurl string) bool
|
||||||
|
func IsSSN(str string) bool
|
||||||
|
func IsSemver(str string) bool
|
||||||
|
func IsTime(str string, format string) bool
|
||||||
|
func IsURL(str string) bool
|
||||||
|
func IsUTFDigit(str string) bool
|
||||||
|
func IsUTFLetter(str string) bool
|
||||||
|
func IsUTFLetterNumeric(str string) bool
|
||||||
|
func IsUTFNumeric(str string) bool
|
||||||
|
func IsUUID(str string) bool
|
||||||
|
func IsUUIDv3(str string) bool
|
||||||
|
func IsUUIDv4(str string) bool
|
||||||
|
func IsUUIDv5(str string) bool
|
||||||
|
func IsUpperCase(str string) bool
|
||||||
|
func IsVariableWidth(str string) bool
|
||||||
|
func IsWhole(value float64) bool
|
||||||
|
func LeftTrim(str, chars string) string
|
||||||
|
func Map(array []interface{}, iterator ResultIterator) []interface{}
|
||||||
|
func Matches(str, pattern string) bool
|
||||||
|
func NormalizeEmail(str string) (string, error)
|
||||||
|
func PadBoth(str string, padStr string, padLen int) string
|
||||||
|
func PadLeft(str string, padStr string, padLen int) string
|
||||||
|
func PadRight(str string, padStr string, padLen int) string
|
||||||
|
func Range(str string, params ...string) bool
|
||||||
|
func RemoveTags(s string) string
|
||||||
|
func ReplacePattern(str, pattern, replace string) string
|
||||||
|
func Reverse(s string) string
|
||||||
|
func RightTrim(str, chars string) string
|
||||||
|
func RuneLength(str string, params ...string) bool
|
||||||
|
func SafeFileName(str string) string
|
||||||
|
func SetFieldsRequiredByDefault(value bool)
|
||||||
|
func Sign(value float64) float64
|
||||||
|
func StringLength(str string, params ...string) bool
|
||||||
|
func StringMatches(s string, params ...string) bool
|
||||||
|
func StripLow(str string, keepNewLines bool) string
|
||||||
|
func ToBoolean(str string) (bool, error)
|
||||||
|
func ToFloat(str string) (float64, error)
|
||||||
|
func ToInt(str string) (int64, error)
|
||||||
|
func ToJSON(obj interface{}) (string, error)
|
||||||
|
func ToString(obj interface{}) string
|
||||||
|
func Trim(str, chars string) string
|
||||||
|
func Truncate(str string, length int, ending string) string
|
||||||
|
func UnderscoreToCamelCase(s string) string
|
||||||
|
func ValidateStruct(s interface{}) (bool, error)
|
||||||
|
func WhiteList(str, chars string) string
|
||||||
|
type ConditionIterator
|
||||||
|
type CustomTypeValidator
|
||||||
|
type Error
|
||||||
|
func (e Error) Error() string
|
||||||
|
type Errors
|
||||||
|
func (es Errors) Error() string
|
||||||
|
func (es Errors) Errors() []error
|
||||||
|
type ISO3166Entry
|
||||||
|
type Iterator
|
||||||
|
type ParamValidator
|
||||||
|
type ResultIterator
|
||||||
|
type UnsupportedTypeError
|
||||||
|
func (e *UnsupportedTypeError) Error() string
|
||||||
|
type Validator
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Examples
|
||||||
|
###### IsURL
|
||||||
|
```go
|
||||||
|
println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
|
||||||
|
```
|
||||||
|
###### ToString
|
||||||
|
```go
|
||||||
|
type User struct {
|
||||||
|
FirstName string
|
||||||
|
LastName string
|
||||||
|
}
|
||||||
|
|
||||||
|
str := govalidator.ToString(&User{"John", "Juan"})
|
||||||
|
println(str)
|
||||||
|
```
|
||||||
|
###### Each, Map, Filter, Count for slices
|
||||||
|
Each iterates over the slice/array and calls Iterator for every item
|
||||||
|
```go
|
||||||
|
data := []interface{}{1, 2, 3, 4, 5}
|
||||||
|
var fn govalidator.Iterator = func(value interface{}, index int) {
|
||||||
|
println(value.(int))
|
||||||
|
}
|
||||||
|
govalidator.Each(data, fn)
|
||||||
|
```
|
||||||
|
```go
|
||||||
|
data := []interface{}{1, 2, 3, 4, 5}
|
||||||
|
var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
|
||||||
|
return value.(int) * 3
|
||||||
|
}
|
||||||
|
_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
|
||||||
|
```
|
||||||
|
```go
|
||||||
|
data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||||
|
var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
|
||||||
|
return value.(int)%2 == 0
|
||||||
|
}
|
||||||
|
_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
|
||||||
|
_ = govalidator.Count(data, fn) // result = 5
|
||||||
|
```
|
||||||
|
###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
|
||||||
|
If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
|
||||||
|
```go
|
||||||
|
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||||
|
return str == "duck"
|
||||||
|
})
|
||||||
|
```
|
||||||
|
For completely custom validators (interface-based), see below.
|
||||||
|
|
||||||
|
Here is a list of available validators for struct fields (validator - used function):
|
||||||
|
```go
|
||||||
|
"email": IsEmail,
|
||||||
|
"url": IsURL,
|
||||||
|
"dialstring": IsDialString,
|
||||||
|
"requrl": IsRequestURL,
|
||||||
|
"requri": IsRequestURI,
|
||||||
|
"alpha": IsAlpha,
|
||||||
|
"utfletter": IsUTFLetter,
|
||||||
|
"alphanum": IsAlphanumeric,
|
||||||
|
"utfletternum": IsUTFLetterNumeric,
|
||||||
|
"numeric": IsNumeric,
|
||||||
|
"utfnumeric": IsUTFNumeric,
|
||||||
|
"utfdigit": IsUTFDigit,
|
||||||
|
"hexadecimal": IsHexadecimal,
|
||||||
|
"hexcolor": IsHexcolor,
|
||||||
|
"rgbcolor": IsRGBcolor,
|
||||||
|
"lowercase": IsLowerCase,
|
||||||
|
"uppercase": IsUpperCase,
|
||||||
|
"int": IsInt,
|
||||||
|
"float": IsFloat,
|
||||||
|
"null": IsNull,
|
||||||
|
"uuid": IsUUID,
|
||||||
|
"uuidv3": IsUUIDv3,
|
||||||
|
"uuidv4": IsUUIDv4,
|
||||||
|
"uuidv5": IsUUIDv5,
|
||||||
|
"creditcard": IsCreditCard,
|
||||||
|
"isbn10": IsISBN10,
|
||||||
|
"isbn13": IsISBN13,
|
||||||
|
"json": IsJSON,
|
||||||
|
"multibyte": IsMultibyte,
|
||||||
|
"ascii": IsASCII,
|
||||||
|
"printableascii": IsPrintableASCII,
|
||||||
|
"fullwidth": IsFullWidth,
|
||||||
|
"halfwidth": IsHalfWidth,
|
||||||
|
"variablewidth": IsVariableWidth,
|
||||||
|
"base64": IsBase64,
|
||||||
|
"datauri": IsDataURI,
|
||||||
|
"ip": IsIP,
|
||||||
|
"port": IsPort,
|
||||||
|
"ipv4": IsIPv4,
|
||||||
|
"ipv6": IsIPv6,
|
||||||
|
"dns": IsDNSName,
|
||||||
|
"host": IsHost,
|
||||||
|
"mac": IsMAC,
|
||||||
|
"latitude": IsLatitude,
|
||||||
|
"longitude": IsLongitude,
|
||||||
|
"ssn": IsSSN,
|
||||||
|
"semver": IsSemver,
|
||||||
|
"rfc3339": IsRFC3339,
|
||||||
|
"ISO3166Alpha2": IsISO3166Alpha2,
|
||||||
|
"ISO3166Alpha3": IsISO3166Alpha3,
|
||||||
|
```
|
||||||
|
Validators with parameters
|
||||||
|
|
||||||
|
```go
|
||||||
|
"range(min|max)": Range,
|
||||||
|
"length(min|max)": ByteLength,
|
||||||
|
"runelength(min|max)": RuneLength,
|
||||||
|
"matches(pattern)": StringMatches,
|
||||||
|
"in(string1|string2|...|stringN)": IsIn,
|
||||||
|
```
|
||||||
|
|
||||||
|
And here is small example of usage:
|
||||||
|
```go
|
||||||
|
type Post struct {
|
||||||
|
Title string `valid:"alphanum,required"`
|
||||||
|
Message string `valid:"duck,ascii"`
|
||||||
|
AuthorIP string `valid:"ipv4"`
|
||||||
|
Date string `valid:"-"`
|
||||||
|
}
|
||||||
|
post := &Post{
|
||||||
|
Title: "My Example Post",
|
||||||
|
Message: "duck",
|
||||||
|
AuthorIP: "123.234.54.3",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add your own struct validation tags
|
||||||
|
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||||
|
return str == "duck"
|
||||||
|
})
|
||||||
|
|
||||||
|
result, err := govalidator.ValidateStruct(post)
|
||||||
|
if err != nil {
|
||||||
|
println("error: " + err.Error())
|
||||||
|
}
|
||||||
|
println(result)
|
||||||
|
```
|
||||||
|
###### WhiteList
|
||||||
|
```go
|
||||||
|
// Remove all characters from string ignoring characters between "a" and "z"
|
||||||
|
println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
|
||||||
|
```
|
||||||
|
|
||||||
|
###### Custom validation functions
|
||||||
|
Custom validation using your own domain specific validators is also available - here's an example of how to use it:
|
||||||
|
```go
|
||||||
|
import "github.com/asaskevich/govalidator"
|
||||||
|
|
||||||
|
type CustomByteArray [6]byte // custom types are supported and can be validated
|
||||||
|
|
||||||
|
type StructWithCustomByteArray struct {
|
||||||
|
ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
|
||||||
|
Email string `valid:"email"`
|
||||||
|
CustomMinLength int `valid:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
||||||
|
switch v := context.(type) { // you can type switch on the context interface being validated
|
||||||
|
case StructWithCustomByteArray:
|
||||||
|
// you can check and validate against some other field in the context,
|
||||||
|
// return early or not validate against the context at all – your choice
|
||||||
|
case SomeOtherType:
|
||||||
|
// ...
|
||||||
|
default:
|
||||||
|
// expecting some other type? Throw/panic here or continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := i.(type) { // type switch on the struct field being validated
|
||||||
|
case CustomByteArray:
|
||||||
|
for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
|
||||||
|
if e != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}))
|
||||||
|
govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
||||||
|
switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
|
||||||
|
case StructWithCustomByteArray:
|
||||||
|
return len(v.ID) >= v.CustomMinLength
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Notes
|
||||||
|
Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
|
||||||
|
Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
|
||||||
|
|
||||||
|
#### Support
|
||||||
|
If you do have a contribution for the package feel free to put up a Pull Request or open Issue.
|
||||||
|
|
||||||
|
#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
|
||||||
|
* [Daniel Lohse](https://github.com/annismckenzie)
|
||||||
|
* [Attila Oláh](https://github.com/attilaolah)
|
||||||
|
* [Daniel Korner](https://github.com/Dadie)
|
||||||
|
* [Steven Wilkin](https://github.com/stevenwilkin)
|
||||||
|
* [Deiwin Sarjas](https://github.com/deiwin)
|
||||||
|
* [Noah Shibley](https://github.com/slugmobile)
|
||||||
|
* [Nathan Davies](https://github.com/nathj07)
|
||||||
|
* [Matt Sanford](https://github.com/mzsanford)
|
||||||
|
* [Simon ccl1115](https://github.com/ccl1115)
|
|
@ -0,0 +1,58 @@
|
||||||
|
package govalidator
|
||||||
|
|
||||||
|
// Iterator is the function that accepts element of slice/array and its index
|
||||||
|
type Iterator func(interface{}, int)
|
||||||
|
|
||||||
|
// ResultIterator is the function that accepts element of slice/array and its index and returns any result
|
||||||
|
type ResultIterator func(interface{}, int) interface{}
|
||||||
|
|
||||||
|
// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
|
||||||
|
type ConditionIterator func(interface{}, int) bool
|
||||||
|
|
||||||
|
// Each iterates over the slice and apply Iterator to every item
|
||||||
|
func Each(array []interface{}, iterator Iterator) {
|
||||||
|
for index, data := range array {
|
||||||
|
iterator(data, index)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
|
||||||
|
func Map(array []interface{}, iterator ResultIterator) []interface{} {
|
||||||
|
var result = make([]interface{}, len(array))
|
||||||
|
for index, data := range array {
|
||||||
|
result[index] = iterator(data, index)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
|
||||||
|
func Find(array []interface{}, iterator ConditionIterator) interface{} {
|
||||||
|
for index, data := range array {
|
||||||
|
if iterator(data, index) {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
|
||||||
|
func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
|
||||||
|
var result = make([]interface{}, 0)
|
||||||
|
for index, data := range array {
|
||||||
|
if iterator(data, index) {
|
||||||
|
result = append(result, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
|
||||||
|
func Count(array []interface{}, iterator ConditionIterator) int {
|
||||||
|
count := 0
|
||||||
|
for index, data := range array {
|
||||||
|
if iterator(data, index) {
|
||||||
|
count = count + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
package govalidator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToString convert the input to a string.
|
||||||
|
func ToString(obj interface{}) string {
|
||||||
|
res := fmt.Sprintf("%v", obj)
|
||||||
|
return string(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToJSON convert the input to a valid JSON string
|
||||||
|
func ToJSON(obj interface{}) (string, error) {
|
||||||
|
res, err := json.Marshal(obj)
|
||||||
|
if err != nil {
|
||||||
|
res = []byte("")
|
||||||
|
}
|
||||||
|
return string(res), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
|
||||||
|
func ToFloat(str string) (float64, error) {
|
||||||
|
res, err := strconv.ParseFloat(str, 64)
|
||||||
|
if err != nil {
|
||||||
|
res = 0.0
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToInt convert the input string to an integer, or 0 if the input is not an integer.
|
||||||
|
func ToInt(str string) (int64, error) {
|
||||||
|
res, err := strconv.ParseInt(str, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
res = 0
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToBoolean convert the input string to a boolean.
|
||||||
|
func ToBoolean(str string) (bool, error) {
|
||||||
|
return strconv.ParseBool(str)
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package govalidator
|
||||||
|
|
||||||
|
// Errors is an array of multiple errors and conforms to the error interface.
|
||||||
|
type Errors []error
|
||||||
|
|
||||||
|
// Errors returns itself.
|
||||||
|
func (es Errors) Errors() []error {
|
||||||
|
return es
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es Errors) Error() string {
|
||||||
|
var err string
|
||||||
|
for _, e := range es {
|
||||||
|
err += e.Error() + ";"
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error encapsulates a name, an error and whether there's a custom error message or not.
|
||||||
|
type Error struct {
|
||||||
|
Name string
|
||||||
|
Err error
|
||||||
|
CustomErrorMessageExists bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e Error) Error() string {
|
||||||
|
if e.CustomErrorMessageExists {
|
||||||
|
return e.Err.Error()
|
||||||
|
}
|
||||||
|
return e.Name + ": " + e.Err.Error()
|
||||||
|
}
|
|
@ -0,0 +1,57 @@
|
||||||
|
package govalidator
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// Abs returns absolute value of number
|
||||||
|
func Abs(value float64) float64 {
|
||||||
|
return math.Abs(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
|
||||||
|
func Sign(value float64) float64 {
|
||||||
|
if value > 0 {
|
||||||
|
return 1
|
||||||
|
} else if value < 0 {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNegative returns true if value < 0
|
||||||
|
func IsNegative(value float64) bool {
|
||||||
|
return value < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPositive returns true if value > 0
|
||||||
|
func IsPositive(value float64) bool {
|
||||||
|
return value > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNonNegative returns true if value >= 0
|
||||||
|
func IsNonNegative(value float64) bool {
|
||||||
|
return value >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNonPositive returns true if value <= 0
|
||||||
|
func IsNonPositive(value float64) bool {
|
||||||
|
return value <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// InRange returns true if value lies between left and right border
|
||||||
|
func InRange(value, left, right float64) bool {
|
||||||
|
if left > right {
|
||||||
|
left, right = right, left
|
||||||
|
}
|
||||||
|
return value >= left && value <= right
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsWhole returns true if value is whole number
|
||||||
|
func IsWhole(value float64) bool {
|
||||||
|
return math.Remainder(value, 1) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNatural returns true if value is natural number (positive and whole)
|
||||||
|
func IsNatural(value float64) bool {
|
||||||
|
return IsWhole(value) && IsPositive(value)
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
package govalidator
|
||||||
|
|
||||||
|
import "regexp"
|
||||||
|
|
||||||
|
// Basic regular expressions for validating strings
|
||||||
|
const (
|
||||||
|
Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
|
||||||
|
CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
|
||||||
|
ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
|
||||||
|
ISBN13 string = "^(?:[0-9]{13})$"
|
||||||
|
UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||||
|
UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||||
|
UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||||
|
UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||||
|
Alpha string = "^[a-zA-Z]+$"
|
||||||
|
Alphanumeric string = "^[a-zA-Z0-9]+$"
|
||||||
|
Numeric string = "^[0-9]+$"
|
||||||
|
Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
|
||||||
|
Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
|
||||||
|
Hexadecimal string = "^[0-9a-fA-F]+$"
|
||||||
|
Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
|
||||||
|
RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
|
||||||
|
ASCII string = "^[\x00-\x7F]+$"
|
||||||
|
Multibyte string = "[^\x00-\x7F]"
|
||||||
|
FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||||
|
HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||||
|
Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
|
||||||
|
PrintableASCII string = "^[\x20-\x7E]+$"
|
||||||
|
DataURI string = "^data:.+\\/(.+);base64$"
|
||||||
|
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
|
||||||
|
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
|
||||||
|
DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
|
||||||
|
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
|
||||||
|
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
|
||||||
|
URLUsername string = `(\S+(:\S*)?@)`
|
||||||
|
Hostname string = ``
|
||||||
|
URLPath string = `((\/|\?|#)[^\s]*)`
|
||||||
|
URLPort string = `(:(\d{1,5}))`
|
||||||
|
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
|
||||||
|
URLSubdomain string = `((www\.)|([a-zA-Z0-9]([-\.][-\._a-zA-Z0-9]+)*))`
|
||||||
|
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
|
||||||
|
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
|
||||||
|
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
||||||
|
UnixPath string = `^(/[^/\x00]*)+/?$`
|
||||||
|
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
|
||||||
|
tagName string = "valid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Used by IsFilePath func
|
||||||
|
const (
|
||||||
|
// Unknown is unresolved OS type
|
||||||
|
Unknown = iota
|
||||||
|
// Win is Windows type
|
||||||
|
Win
|
||||||
|
// Unix is *nix OS types
|
||||||
|
Unix
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
rxEmail = regexp.MustCompile(Email)
|
||||||
|
rxCreditCard = regexp.MustCompile(CreditCard)
|
||||||
|
rxISBN10 = regexp.MustCompile(ISBN10)
|
||||||
|
rxISBN13 = regexp.MustCompile(ISBN13)
|
||||||
|
rxUUID3 = regexp.MustCompile(UUID3)
|
||||||
|
rxUUID4 = regexp.MustCompile(UUID4)
|
||||||
|
rxUUID5 = regexp.MustCompile(UUID5)
|
||||||
|
rxUUID = regexp.MustCompile(UUID)
|
||||||
|
rxAlpha = regexp.MustCompile(Alpha)
|
||||||
|
rxAlphanumeric = regexp.MustCompile(Alphanumeric)
|
||||||
|
rxNumeric = regexp.MustCompile(Numeric)
|
||||||
|
rxInt = regexp.MustCompile(Int)
|
||||||
|
rxFloat = regexp.MustCompile(Float)
|
||||||
|
rxHexadecimal = regexp.MustCompile(Hexadecimal)
|
||||||
|
rxHexcolor = regexp.MustCompile(Hexcolor)
|
||||||
|
rxRGBcolor = regexp.MustCompile(RGBcolor)
|
||||||
|
rxASCII = regexp.MustCompile(ASCII)
|
||||||
|
rxPrintableASCII = regexp.MustCompile(PrintableASCII)
|
||||||
|
rxMultibyte = regexp.MustCompile(Multibyte)
|
||||||
|
rxFullWidth = regexp.MustCompile(FullWidth)
|
||||||
|
rxHalfWidth = regexp.MustCompile(HalfWidth)
|
||||||
|
rxBase64 = regexp.MustCompile(Base64)
|
||||||
|
rxDataURI = regexp.MustCompile(DataURI)
|
||||||
|
rxLatitude = regexp.MustCompile(Latitude)
|
||||||
|
rxLongitude = regexp.MustCompile(Longitude)
|
||||||
|
rxDNSName = regexp.MustCompile(DNSName)
|
||||||
|
rxURL = regexp.MustCompile(URL)
|
||||||
|
rxSSN = regexp.MustCompile(SSN)
|
||||||
|
rxWinPath = regexp.MustCompile(WinPath)
|
||||||
|
rxUnixPath = regexp.MustCompile(UnixPath)
|
||||||
|
rxSemver = regexp.MustCompile(Semver)
|
||||||
|
)
|
|
@ -0,0 +1,418 @@
|
||||||
|
package govalidator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validator is a wrapper for a validator function that returns bool and accepts string.
|
||||||
|
type Validator func(str string) bool
|
||||||
|
|
||||||
|
// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
|
||||||
|
// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
|
||||||
|
type CustomTypeValidator func(i interface{}, o interface{}) bool
|
||||||
|
|
||||||
|
// ParamValidator is a wrapper for validator functions that accepts additional parameters.
|
||||||
|
type ParamValidator func(str string, params ...string) bool
|
||||||
|
type tagOptionsMap map[string]string
|
||||||
|
|
||||||
|
// UnsupportedTypeError is a wrapper for reflect.Type
|
||||||
|
type UnsupportedTypeError struct {
|
||||||
|
Type reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
||||||
|
// It implements the methods to sort by string.
|
||||||
|
type stringValues []reflect.Value
|
||||||
|
|
||||||
|
// ParamTagMap is a map of functions accept variants parameters
|
||||||
|
var ParamTagMap = map[string]ParamValidator{
|
||||||
|
"length": ByteLength,
|
||||||
|
"range": Range,
|
||||||
|
"runelength": RuneLength,
|
||||||
|
"stringlength": StringLength,
|
||||||
|
"matches": StringMatches,
|
||||||
|
"in": isInRaw,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParamTagRegexMap maps param tags to their respective regexes.
|
||||||
|
var ParamTagRegexMap = map[string]*regexp.Regexp{
|
||||||
|
"range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
|
||||||
|
"length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
|
||||||
|
"runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
|
||||||
|
"stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
|
||||||
|
"in": regexp.MustCompile(`^in\((.*)\)`),
|
||||||
|
"matches": regexp.MustCompile(`^matches\((.+)\)$`),
|
||||||
|
}
|
||||||
|
|
||||||
|
type customTypeTagMap struct {
|
||||||
|
validators map[string]CustomTypeValidator
|
||||||
|
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
|
||||||
|
tm.RLock()
|
||||||
|
defer tm.RUnlock()
|
||||||
|
v, ok := tm.validators[name]
|
||||||
|
return v, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
|
||||||
|
tm.Lock()
|
||||||
|
defer tm.Unlock()
|
||||||
|
tm.validators[name] = ctv
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
|
||||||
|
// Use this to validate compound or custom types that need to be handled as a whole, e.g.
|
||||||
|
// `type UUID [16]byte` (this would be handled as an array of bytes).
|
||||||
|
var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
|
||||||
|
|
||||||
|
// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
|
||||||
|
var TagMap = map[string]Validator{
|
||||||
|
"email": IsEmail,
|
||||||
|
"url": IsURL,
|
||||||
|
"dialstring": IsDialString,
|
||||||
|
"requrl": IsRequestURL,
|
||||||
|
"requri": IsRequestURI,
|
||||||
|
"alpha": IsAlpha,
|
||||||
|
"utfletter": IsUTFLetter,
|
||||||
|
"alphanum": IsAlphanumeric,
|
||||||
|
"utfletternum": IsUTFLetterNumeric,
|
||||||
|
"numeric": IsNumeric,
|
||||||
|
"utfnumeric": IsUTFNumeric,
|
||||||
|
"utfdigit": IsUTFDigit,
|
||||||
|
"hexadecimal": IsHexadecimal,
|
||||||
|
"hexcolor": IsHexcolor,
|
||||||
|
"rgbcolor": IsRGBcolor,
|
||||||
|
"lowercase": IsLowerCase,
|
||||||
|
"uppercase": IsUpperCase,
|
||||||
|
"int": IsInt,
|
||||||
|
"float": IsFloat,
|
||||||
|
"null": IsNull,
|
||||||
|
"uuid": IsUUID,
|
||||||
|
"uuidv3": IsUUIDv3,
|
||||||
|
"uuidv4": IsUUIDv4,
|
||||||
|
"uuidv5": IsUUIDv5,
|
||||||
|
"creditcard": IsCreditCard,
|
||||||
|
"isbn10": IsISBN10,
|
||||||
|
"isbn13": IsISBN13,
|
||||||
|
"json": IsJSON,
|
||||||
|
"multibyte": IsMultibyte,
|
||||||
|
"ascii": IsASCII,
|
||||||
|
"printableascii": IsPrintableASCII,
|
||||||
|
"fullwidth": IsFullWidth,
|
||||||
|
"halfwidth": IsHalfWidth,
|
||||||
|
"variablewidth": IsVariableWidth,
|
||||||
|
"base64": IsBase64,
|
||||||
|
"datauri": IsDataURI,
|
||||||
|
"ip": IsIP,
|
||||||
|
"port": IsPort,
|
||||||
|
"ipv4": IsIPv4,
|
||||||
|
"ipv6": IsIPv6,
|
||||||
|
"dns": IsDNSName,
|
||||||
|
"host": IsHost,
|
||||||
|
"mac": IsMAC,
|
||||||
|
"latitude": IsLatitude,
|
||||||
|
"longitude": IsLongitude,
|
||||||
|
"ssn": IsSSN,
|
||||||
|
"semver": IsSemver,
|
||||||
|
"rfc3339": IsRFC3339,
|
||||||
|
"ISO3166Alpha2": IsISO3166Alpha2,
|
||||||
|
"ISO3166Alpha3": IsISO3166Alpha3,
|
||||||
|
"ISO4217": IsISO4217,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ISO3166Entry stores country codes
|
||||||
|
type ISO3166Entry struct {
|
||||||
|
EnglishShortName string
|
||||||
|
FrenchShortName string
|
||||||
|
Alpha2Code string
|
||||||
|
Alpha3Code string
|
||||||
|
Numeric string
|
||||||
|
}
|
||||||
|
|
||||||
|
//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
|
||||||
|
var ISO3166List = []ISO3166Entry{
|
||||||
|
{"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
|
||||||
|
{"Albania", "Albanie (l')", "AL", "ALB", "008"},
|
||||||
|
{"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
|
||||||
|
{"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
|
||||||
|
{"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
|
||||||
|
{"Andorra", "Andorre (l')", "AD", "AND", "020"},
|
||||||
|
{"Angola", "Angola (l')", "AO", "AGO", "024"},
|
||||||
|
{"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
|
||||||
|
{"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
|
||||||
|
{"Argentina", "Argentine (l')", "AR", "ARG", "032"},
|
||||||
|
{"Australia", "Australie (l')", "AU", "AUS", "036"},
|
||||||
|
{"Austria", "Autriche (l')", "AT", "AUT", "040"},
|
||||||
|
{"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
|
||||||
|
{"Bahrain", "Bahreïn", "BH", "BHR", "048"},
|
||||||
|
{"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
|
||||||
|
{"Armenia", "Arménie (l')", "AM", "ARM", "051"},
|
||||||
|
{"Barbados", "Barbade (la)", "BB", "BRB", "052"},
|
||||||
|
{"Belgium", "Belgique (la)", "BE", "BEL", "056"},
|
||||||
|
{"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
|
||||||
|
{"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
|
||||||
|
{"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
|
||||||
|
{"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
|
||||||
|
{"Botswana", "Botswana (le)", "BW", "BWA", "072"},
|
||||||
|
{"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
|
||||||
|
{"Brazil", "Brésil (le)", "BR", "BRA", "076"},
|
||||||
|
{"Belize", "Belize (le)", "BZ", "BLZ", "084"},
|
||||||
|
{"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
|
||||||
|
{"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
|
||||||
|
{"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
|
||||||
|
{"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
|
||||||
|
{"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
|
||||||
|
{"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
|
||||||
|
{"Burundi", "Burundi (le)", "BI", "BDI", "108"},
|
||||||
|
{"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
|
||||||
|
{"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
|
||||||
|
{"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
|
||||||
|
{"Canada", "Canada (le)", "CA", "CAN", "124"},
|
||||||
|
{"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
|
||||||
|
{"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
|
||||||
|
{"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
|
||||||
|
{"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
|
||||||
|
{"Chad", "Tchad (le)", "TD", "TCD", "148"},
|
||||||
|
{"Chile", "Chili (le)", "CL", "CHL", "152"},
|
||||||
|
{"China", "Chine (la)", "CN", "CHN", "156"},
|
||||||
|
{"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
|
||||||
|
{"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
|
||||||
|
{"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
|
||||||
|
{"Colombia", "Colombie (la)", "CO", "COL", "170"},
|
||||||
|
{"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
|
||||||
|
{"Mayotte", "Mayotte", "YT", "MYT", "175"},
|
||||||
|
{"Congo (the)", "Congo (le)", "CG", "COG", "178"},
|
||||||
|
{"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
|
||||||
|
{"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
|
||||||
|
{"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
|
||||||
|
{"Croatia", "Croatie (la)", "HR", "HRV", "191"},
|
||||||
|
{"Cuba", "Cuba", "CU", "CUB", "192"},
|
||||||
|
{"Cyprus", "Chypre", "CY", "CYP", "196"},
|
||||||
|
{"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
|
||||||
|
{"Benin", "Bénin (le)", "BJ", "BEN", "204"},
|
||||||
|
{"Denmark", "Danemark (le)", "DK", "DNK", "208"},
|
||||||
|
{"Dominica", "Dominique (la)", "DM", "DMA", "212"},
|
||||||
|
{"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
|
||||||
|
{"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
|
||||||
|
{"El Salvador", "El Salvador", "SV", "SLV", "222"},
|
||||||
|
{"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
|
||||||
|
{"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
|
||||||
|
{"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
|
||||||
|
{"Estonia", "Estonie (l')", "EE", "EST", "233"},
|
||||||
|
{"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
|
||||||
|
{"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
|
||||||
|
{"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
|
||||||
|
{"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
|
||||||
|
{"Finland", "Finlande (la)", "FI", "FIN", "246"},
|
||||||
|
{"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
|
||||||
|
{"France", "France (la)", "FR", "FRA", "250"},
|
||||||
|
{"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
|
||||||
|
{"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
|
||||||
|
{"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
|
||||||
|
{"Djibouti", "Djibouti", "DJ", "DJI", "262"},
|
||||||
|
{"Gabon", "Gabon (le)", "GA", "GAB", "266"},
|
||||||
|
{"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
|
||||||
|
{"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
|
||||||
|
{"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
|
||||||
|
{"Germany", "Allemagne (l')", "DE", "DEU", "276"},
|
||||||
|
{"Ghana", "Ghana (le)", "GH", "GHA", "288"},
|
||||||
|
{"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
|
||||||
|
{"Kiribati", "Kiribati", "KI", "KIR", "296"},
|
||||||
|
{"Greece", "Grèce (la)", "GR", "GRC", "300"},
|
||||||
|
{"Greenland", "Groenland (le)", "GL", "GRL", "304"},
|
||||||
|
{"Grenada", "Grenade (la)", "GD", "GRD", "308"},
|
||||||
|
{"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
|
||||||
|
{"Guam", "Guam", "GU", "GUM", "316"},
|
||||||
|
{"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
|
||||||
|
{"Guinea", "Guinée (la)", "GN", "GIN", "324"},
|
||||||
|
{"Guyana", "Guyana (le)", "GY", "GUY", "328"},
|
||||||
|
{"Haiti", "Haïti", "HT", "HTI", "332"},
|
||||||
|
{"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
|
||||||
|
{"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
|
||||||
|
{"Honduras", "Honduras (le)", "HN", "HND", "340"},
|
||||||
|
{"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
|
||||||
|
{"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
|
||||||
|
{"Iceland", "Islande (l')", "IS", "ISL", "352"},
|
||||||
|
{"India", "Inde (l')", "IN", "IND", "356"},
|
||||||
|
{"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
|
||||||
|
{"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
|
||||||
|
{"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
|
||||||
|
{"Ireland", "Irlande (l')", "IE", "IRL", "372"},
|
||||||
|
{"Israel", "Israël", "IL", "ISR", "376"},
|
||||||
|
{"Italy", "Italie (l')", "IT", "ITA", "380"},
|
||||||
|
{"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
|
||||||
|
{"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
|
||||||
|
{"Japan", "Japon (le)", "JP", "JPN", "392"},
|
||||||
|
{"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
|
||||||
|
{"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
|
||||||
|
{"Kenya", "Kenya (le)", "KE", "KEN", "404"},
|
||||||
|
{"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
|
||||||
|
{"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
|
||||||
|
{"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
|
||||||
|
{"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
|
||||||
|
{"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
|
||||||
|
{"Lebanon", "Liban (le)", "LB", "LBN", "422"},
|
||||||
|
{"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
|
||||||
|
{"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
|
||||||
|
{"Liberia", "Libéria (le)", "LR", "LBR", "430"},
|
||||||
|
{"Libya", "Libye (la)", "LY", "LBY", "434"},
|
||||||
|
{"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
|
||||||
|
{"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
|
||||||
|
{"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
|
||||||
|
{"Macao", "Macao", "MO", "MAC", "446"},
|
||||||
|
{"Madagascar", "Madagascar", "MG", "MDG", "450"},
|
||||||
|
{"Malawi", "Malawi (le)", "MW", "MWI", "454"},
|
||||||
|
{"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
|
||||||
|
{"Maldives", "Maldives (les)", "MV", "MDV", "462"},
|
||||||
|
{"Mali", "Mali (le)", "ML", "MLI", "466"},
|
||||||
|
{"Malta", "Malte", "MT", "MLT", "470"},
|
||||||
|
{"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
|
||||||
|
{"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
|
||||||
|
{"Mauritius", "Maurice", "MU", "MUS", "480"},
|
||||||
|
{"Mexico", "Mexique (le)", "MX", "MEX", "484"},
|
||||||
|
{"Monaco", "Monaco", "MC", "MCO", "492"},
|
||||||
|
{"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
|
||||||
|
{"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
|
||||||
|
{"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
|
||||||
|
{"Montserrat", "Montserrat", "MS", "MSR", "500"},
|
||||||
|
{"Morocco", "Maroc (le)", "MA", "MAR", "504"},
|
||||||
|
{"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
|
||||||
|
{"Oman", "Oman", "OM", "OMN", "512"},
|
||||||
|
{"Namibia", "Namibie (la)", "NA", "NAM", "516"},
|
||||||
|
{"Nauru", "Nauru", "NR", "NRU", "520"},
|
||||||
|
{"Nepal", "Népal (le)", "NP", "NPL", "524"},
|
||||||
|
{"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
|
||||||
|
{"Curaçao", "Curaçao", "CW", "CUW", "531"},
|
||||||
|
{"Aruba", "Aruba", "AW", "ABW", "533"},
|
||||||
|
{"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
|
||||||
|
{"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
|
||||||
|
{"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
|
||||||
|
{"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
|
||||||
|
{"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
|
||||||
|
{"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
|
||||||
|
{"Niger (the)", "Niger (le)", "NE", "NER", "562"},
|
||||||
|
{"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
|
||||||
|
{"Niue", "Niue", "NU", "NIU", "570"},
|
||||||
|
{"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
|
||||||
|
{"Norway", "Norvège (la)", "NO", "NOR", "578"},
|
||||||
|
{"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
|
||||||
|
{"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
|
||||||
|
{"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
|
||||||
|
{"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
|
||||||
|
{"Palau", "Palaos (les)", "PW", "PLW", "585"},
|
||||||
|
{"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
|
||||||
|
{"Panama", "Panama (le)", "PA", "PAN", "591"},
|
||||||
|
{"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
|
||||||
|
{"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
|
||||||
|
{"Peru", "Pérou (le)", "PE", "PER", "604"},
|
||||||
|
{"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
|
||||||
|
{"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
|
||||||
|
{"Poland", "Pologne (la)", "PL", "POL", "616"},
|
||||||
|
{"Portugal", "Portugal (le)", "PT", "PRT", "620"},
|
||||||
|
{"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
|
||||||
|
{"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
|
||||||
|
{"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
|
||||||
|
{"Qatar", "Qatar (le)", "QA", "QAT", "634"},
|
||||||
|
{"Réunion", "Réunion (La)", "RE", "REU", "638"},
|
||||||
|
{"Romania", "Roumanie (la)", "RO", "ROU", "642"},
|
||||||
|
{"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
|
||||||
|
{"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
|
||||||
|
{"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
|
||||||
|
{"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
|
||||||
|
{"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
|
||||||
|
{"Anguilla", "Anguilla", "AI", "AIA", "660"},
|
||||||
|
{"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
|
||||||
|
{"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
|
||||||
|
{"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
|
||||||
|
{"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
|
||||||
|
{"San Marino", "Saint-Marin", "SM", "SMR", "674"},
|
||||||
|
{"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
|
||||||
|
{"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
|
||||||
|
{"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
|
||||||
|
{"Serbia", "Serbie (la)", "RS", "SRB", "688"},
|
||||||
|
{"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
|
||||||
|
{"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
|
||||||
|
{"Singapore", "Singapour", "SG", "SGP", "702"},
|
||||||
|
{"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
|
||||||
|
{"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
|
||||||
|
{"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
|
||||||
|
{"Somalia", "Somalie (la)", "SO", "SOM", "706"},
|
||||||
|
{"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
|
||||||
|
{"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
|
||||||
|
{"Spain", "Espagne (l')", "ES", "ESP", "724"},
|
||||||
|
{"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
|
||||||
|
{"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
|
||||||
|
{"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
|
||||||
|
{"Suriname", "Suriname (le)", "SR", "SUR", "740"},
|
||||||
|
{"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
|
||||||
|
{"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
|
||||||
|
{"Sweden", "Suède (la)", "SE", "SWE", "752"},
|
||||||
|
{"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
|
||||||
|
{"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
|
||||||
|
{"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
|
||||||
|
{"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
|
||||||
|
{"Togo", "Togo (le)", "TG", "TGO", "768"},
|
||||||
|
{"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
|
||||||
|
{"Tonga", "Tonga (les)", "TO", "TON", "776"},
|
||||||
|
{"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
|
||||||
|
{"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
|
||||||
|
{"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
|
||||||
|
{"Turkey", "Turquie (la)", "TR", "TUR", "792"},
|
||||||
|
{"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
|
||||||
|
{"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
|
||||||
|
{"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
|
||||||
|
{"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
|
||||||
|
{"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
|
||||||
|
{"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
|
||||||
|
{"Egypt", "Égypte (l')", "EG", "EGY", "818"},
|
||||||
|
{"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
|
||||||
|
{"Guernsey", "Guernesey", "GG", "GGY", "831"},
|
||||||
|
{"Jersey", "Jersey", "JE", "JEY", "832"},
|
||||||
|
{"Isle of Man", "Île de Man", "IM", "IMN", "833"},
|
||||||
|
{"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
|
||||||
|
{"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
|
||||||
|
{"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
|
||||||
|
{"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
|
||||||
|
{"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
|
||||||
|
{"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
|
||||||
|
{"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
|
||||||
|
{"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
|
||||||
|
{"Samoa", "Samoa (le)", "WS", "WSM", "882"},
|
||||||
|
{"Yemen", "Yémen (le)", "YE", "YEM", "887"},
|
||||||
|
{"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// ISO4217List is the list of ISO currency codes
|
||||||
|
var ISO4217List = []string{
|
||||||
|
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
|
||||||
|
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
|
||||||
|
"CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
|
||||||
|
"DJF", "DKK", "DOP", "DZD",
|
||||||
|
"EGP", "ERN", "ETB", "EUR",
|
||||||
|
"FJD", "FKP",
|
||||||
|
"GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
|
||||||
|
"HKD", "HNL", "HRK", "HTG", "HUF",
|
||||||
|
"IDR", "ILS", "INR", "IQD", "IRR", "ISK",
|
||||||
|
"JMD", "JOD", "JPY",
|
||||||
|
"KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
|
||||||
|
"LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
|
||||||
|
"MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
|
||||||
|
"NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
|
||||||
|
"OMR",
|
||||||
|
"PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
|
||||||
|
"QAR",
|
||||||
|
"RON", "RSD", "RUB", "RWF",
|
||||||
|
"SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL",
|
||||||
|
"THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
|
||||||
|
"UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS",
|
||||||
|
"VEF", "VND", "VUV",
|
||||||
|
"WST",
|
||||||
|
"XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
|
||||||
|
"YER",
|
||||||
|
"ZAR", "ZMW", "ZWL",
|
||||||
|
}
|
|
@ -0,0 +1,262 @@
|
||||||
|
package govalidator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"html"
|
||||||
|
"math"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Contains check if the string contains the substring.
|
||||||
|
func Contains(str, substring string) bool {
|
||||||
|
return strings.Contains(str, substring)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches check if string matches the pattern (pattern is regular expression)
|
||||||
|
// In case of error return false
|
||||||
|
func Matches(str, pattern string) bool {
|
||||||
|
match, _ := regexp.MatchString(pattern, str)
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeftTrim trim characters from the left-side of the input.
|
||||||
|
// If second argument is empty, it's will be remove leading spaces.
|
||||||
|
func LeftTrim(str, chars string) string {
|
||||||
|
if chars == "" {
|
||||||
|
return strings.TrimLeftFunc(str, unicode.IsSpace)
|
||||||
|
}
|
||||||
|
r, _ := regexp.Compile("^[" + chars + "]+")
|
||||||
|
return r.ReplaceAllString(str, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RightTrim trim characters from the right-side of the input.
|
||||||
|
// If second argument is empty, it's will be remove spaces.
|
||||||
|
func RightTrim(str, chars string) string {
|
||||||
|
if chars == "" {
|
||||||
|
return strings.TrimRightFunc(str, unicode.IsSpace)
|
||||||
|
}
|
||||||
|
r, _ := regexp.Compile("[" + chars + "]+$")
|
||||||
|
return r.ReplaceAllString(str, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trim trim characters from both sides of the input.
|
||||||
|
// If second argument is empty, it's will be remove spaces.
|
||||||
|
func Trim(str, chars string) string {
|
||||||
|
return LeftTrim(RightTrim(str, chars), chars)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhiteList remove characters that do not appear in the whitelist.
|
||||||
|
func WhiteList(str, chars string) string {
|
||||||
|
pattern := "[^" + chars + "]+"
|
||||||
|
r, _ := regexp.Compile(pattern)
|
||||||
|
return r.ReplaceAllString(str, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlackList remove characters that appear in the blacklist.
|
||||||
|
func BlackList(str, chars string) string {
|
||||||
|
pattern := "[" + chars + "]+"
|
||||||
|
r, _ := regexp.Compile(pattern)
|
||||||
|
return r.ReplaceAllString(str, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
|
||||||
|
// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
|
||||||
|
func StripLow(str string, keepNewLines bool) string {
|
||||||
|
chars := ""
|
||||||
|
if keepNewLines {
|
||||||
|
chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
|
||||||
|
} else {
|
||||||
|
chars = "\x00-\x1F\x7F"
|
||||||
|
}
|
||||||
|
return BlackList(str, chars)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplacePattern replace regular expression pattern in string
|
||||||
|
func ReplacePattern(str, pattern, replace string) string {
|
||||||
|
r, _ := regexp.Compile(pattern)
|
||||||
|
return r.ReplaceAllString(str, replace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Escape replace <, >, & and " with HTML entities.
|
||||||
|
var Escape = html.EscapeString
|
||||||
|
|
||||||
|
func addSegment(inrune, segment []rune) []rune {
|
||||||
|
if len(segment) == 0 {
|
||||||
|
return inrune
|
||||||
|
}
|
||||||
|
if len(inrune) != 0 {
|
||||||
|
inrune = append(inrune, '_')
|
||||||
|
}
|
||||||
|
inrune = append(inrune, segment...)
|
||||||
|
return inrune
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnderscoreToCamelCase converts from underscore separated form to camel case form.
|
||||||
|
// Ex.: my_func => MyFunc
|
||||||
|
func UnderscoreToCamelCase(s string) string {
|
||||||
|
return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CamelCaseToUnderscore converts from camel case form to underscore separated form.
|
||||||
|
// Ex.: MyFunc => my_func
|
||||||
|
func CamelCaseToUnderscore(str string) string {
|
||||||
|
var output []rune
|
||||||
|
var segment []rune
|
||||||
|
for _, r := range str {
|
||||||
|
if !unicode.IsLower(r) {
|
||||||
|
output = addSegment(output, segment)
|
||||||
|
segment = nil
|
||||||
|
}
|
||||||
|
segment = append(segment, unicode.ToLower(r))
|
||||||
|
}
|
||||||
|
output = addSegment(output, segment)
|
||||||
|
return string(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reverse return reversed string
|
||||||
|
func Reverse(s string) string {
|
||||||
|
r := []rune(s)
|
||||||
|
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
r[i], r[j] = r[j], r[i]
|
||||||
|
}
|
||||||
|
return string(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLines split string by "\n" and return array of lines
|
||||||
|
func GetLines(s string) []string {
|
||||||
|
return strings.Split(s, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLine return specified line of multiline string
|
||||||
|
func GetLine(s string, index int) (string, error) {
|
||||||
|
lines := GetLines(s)
|
||||||
|
if index < 0 || index >= len(lines) {
|
||||||
|
return "", errors.New("line index out of bounds")
|
||||||
|
}
|
||||||
|
return lines[index], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveTags remove all tags from HTML string
|
||||||
|
func RemoveTags(s string) string {
|
||||||
|
return ReplacePattern(s, "<[^>]*>", "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SafeFileName return safe string that can be used in file names
|
||||||
|
func SafeFileName(str string) string {
|
||||||
|
name := strings.ToLower(str)
|
||||||
|
name = path.Clean(path.Base(name))
|
||||||
|
name = strings.Trim(name, " ")
|
||||||
|
separators, err := regexp.Compile(`[ &_=+:]`)
|
||||||
|
if err == nil {
|
||||||
|
name = separators.ReplaceAllString(name, "-")
|
||||||
|
}
|
||||||
|
legal, err := regexp.Compile(`[^[:alnum:]-.]`)
|
||||||
|
if err == nil {
|
||||||
|
name = legal.ReplaceAllString(name, "")
|
||||||
|
}
|
||||||
|
for strings.Contains(name, "--") {
|
||||||
|
name = strings.Replace(name, "--", "-", -1)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// NormalizeEmail canonicalize an email address.
|
||||||
|
// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
|
||||||
|
// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
|
||||||
|
// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
|
||||||
|
// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
|
||||||
|
// normalized to @gmail.com.
|
||||||
|
func NormalizeEmail(str string) (string, error) {
|
||||||
|
if !IsEmail(str) {
|
||||||
|
return "", fmt.Errorf("%s is not an email", str)
|
||||||
|
}
|
||||||
|
parts := strings.Split(str, "@")
|
||||||
|
parts[0] = strings.ToLower(parts[0])
|
||||||
|
parts[1] = strings.ToLower(parts[1])
|
||||||
|
if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
|
||||||
|
parts[1] = "gmail.com"
|
||||||
|
parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
|
||||||
|
}
|
||||||
|
return strings.Join(parts, "@"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Truncate a string to the closest length without breaking words.
|
||||||
|
func Truncate(str string, length int, ending string) string {
|
||||||
|
var aftstr, befstr string
|
||||||
|
if len(str) > length {
|
||||||
|
words := strings.Fields(str)
|
||||||
|
before, present := 0, 0
|
||||||
|
for i := range words {
|
||||||
|
befstr = aftstr
|
||||||
|
before = present
|
||||||
|
aftstr = aftstr + words[i] + " "
|
||||||
|
present = len(aftstr)
|
||||||
|
if present > length && i != 0 {
|
||||||
|
if (length - before) < (present - length) {
|
||||||
|
return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
|
||||||
|
}
|
||||||
|
return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
// PadLeft pad left side of string if size of string is less then indicated pad length
|
||||||
|
func PadLeft(str string, padStr string, padLen int) string {
|
||||||
|
return buildPadStr(str, padStr, padLen, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PadRight pad right side of string if size of string is less then indicated pad length
|
||||||
|
func PadRight(str string, padStr string, padLen int) string {
|
||||||
|
return buildPadStr(str, padStr, padLen, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PadBoth pad sides of string if size of string is less then indicated pad length
|
||||||
|
func PadBoth(str string, padStr string, padLen int) string {
|
||||||
|
return buildPadStr(str, padStr, padLen, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PadString either left, right or both sides, not the padding string can be unicode and more then one
|
||||||
|
// character
|
||||||
|
func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
|
||||||
|
|
||||||
|
// When padded length is less then the current string size
|
||||||
|
if padLen < utf8.RuneCountInString(str) {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
padLen -= utf8.RuneCountInString(str)
|
||||||
|
|
||||||
|
targetLen := padLen
|
||||||
|
|
||||||
|
targetLenLeft := targetLen
|
||||||
|
targetLenRight := targetLen
|
||||||
|
if padLeft && padRight {
|
||||||
|
targetLenLeft = padLen / 2
|
||||||
|
targetLenRight = padLen - targetLenLeft
|
||||||
|
}
|
||||||
|
|
||||||
|
strToRepeatLen := utf8.RuneCountInString(padStr)
|
||||||
|
|
||||||
|
repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
|
||||||
|
repeatedString := strings.Repeat(padStr, repeatTimes)
|
||||||
|
|
||||||
|
leftSide := ""
|
||||||
|
if padLeft {
|
||||||
|
leftSide = repeatedString[0:targetLenLeft]
|
||||||
|
}
|
||||||
|
|
||||||
|
rightSide := ""
|
||||||
|
if padRight {
|
||||||
|
rightSide = repeatedString[0:targetLenRight]
|
||||||
|
}
|
||||||
|
|
||||||
|
return leftSide + str + rightSide
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,15 @@
|
||||||
|
box: golang
|
||||||
|
build:
|
||||||
|
steps:
|
||||||
|
- setup-go-workspace
|
||||||
|
|
||||||
|
- script:
|
||||||
|
name: go get
|
||||||
|
code: |
|
||||||
|
go version
|
||||||
|
go get -t ./...
|
||||||
|
|
||||||
|
- script:
|
||||||
|
name: go test
|
||||||
|
code: |
|
||||||
|
go test -race ./...
|
|
@ -0,0 +1,46 @@
|
||||||
|
Change history of swagger
|
||||||
|
=
|
||||||
|
2017-01-30
|
||||||
|
- moved from go-restful/swagger to go-restful-swagger12
|
||||||
|
|
||||||
|
2015-10-16
|
||||||
|
- add type override mechanism for swagger models (MR 254, nathanejohnson)
|
||||||
|
- replace uses of wildcard in generated apidocs (issue 251)
|
||||||
|
|
||||||
|
2015-05-25
|
||||||
|
- (api break) changed the type of Properties in Model
|
||||||
|
- (api break) changed the type of Models in ApiDeclaration
|
||||||
|
- (api break) changed the parameter type of PostBuildDeclarationMapFunc
|
||||||
|
|
||||||
|
2015-04-09
|
||||||
|
- add ModelBuildable interface for customization of Model
|
||||||
|
|
||||||
|
2015-03-17
|
||||||
|
- preserve order of Routes per WebService in Swagger listing
|
||||||
|
- fix use of $ref and type in Swagger models
|
||||||
|
- add api version to listing
|
||||||
|
|
||||||
|
2014-11-14
|
||||||
|
- operation parameters are now sorted using ordering path,query,form,header,body
|
||||||
|
|
||||||
|
2014-11-12
|
||||||
|
- respect omitempty tag value for embedded structs
|
||||||
|
- expose ApiVersion of WebService to Swagger ApiDeclaration
|
||||||
|
|
||||||
|
2014-05-29
|
||||||
|
- (api add) Ability to define custom http.Handler to serve swagger-ui static files
|
||||||
|
|
||||||
|
2014-05-04
|
||||||
|
- (fix) include model for array element type of response
|
||||||
|
|
||||||
|
2014-01-03
|
||||||
|
- (fix) do not add primitive type to the Api models
|
||||||
|
|
||||||
|
2013-11-27
|
||||||
|
- (fix) make Swagger work for WebServices with root ("/" or "") paths
|
||||||
|
|
||||||
|
2013-10-29
|
||||||
|
- (api add) package variable LogInfo to customize logging function
|
||||||
|
|
||||||
|
2013-10-15
|
||||||
|
- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition)
|
|
@ -0,0 +1,22 @@
|
||||||
|
Copyright (c) 2017 Ernest Micklei
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,83 @@
|
||||||
|
# go-restful-swagger12
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12)
|
||||||
|
|
||||||
|
How to use Swagger UI with go-restful
|
||||||
|
=
|
||||||
|
|
||||||
|
Get the Swagger UI sources (version 1.2 only)
|
||||||
|
|
||||||
|
git clone https://github.com/wordnik/swagger-ui.git
|
||||||
|
|
||||||
|
The project contains a "dist" folder.
|
||||||
|
Its contents has all the Swagger UI files you need.
|
||||||
|
|
||||||
|
The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
|
||||||
|
You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
|
||||||
|
|
||||||
|
Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
|
||||||
|
|
||||||
|
config := swagger.Config{
|
||||||
|
WebServices: restful.RegisteredWebServices(),
|
||||||
|
ApiPath: "/apidocs.json",
|
||||||
|
SwaggerPath: "/apidocs/",
|
||||||
|
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
|
||||||
|
swagger.InstallSwaggerService(config)
|
||||||
|
|
||||||
|
|
||||||
|
Documenting Structs
|
||||||
|
--
|
||||||
|
|
||||||
|
Currently there are 2 ways to document your structs in the go-restful Swagger.
|
||||||
|
|
||||||
|
###### By using struct tags
|
||||||
|
- Use tag "description" to annotate a struct field with a description to show in the UI
|
||||||
|
- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
|
||||||
|
|
||||||
|
###### By using the SwaggerDoc method
|
||||||
|
Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
|
||||||
|
|
||||||
|
type Address struct {
|
||||||
|
Country string `json:"country,omitempty"`
|
||||||
|
PostCode int `json:"postcode,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Address) SwaggerDoc() map[string]string {
|
||||||
|
return map[string]string{
|
||||||
|
"": "Address doc",
|
||||||
|
"country": "Country doc",
|
||||||
|
"postcode": "PostCode doc",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
This example will generate a JSON like this
|
||||||
|
|
||||||
|
{
|
||||||
|
"Address": {
|
||||||
|
"id": "Address",
|
||||||
|
"description": "Address doc",
|
||||||
|
"properties": {
|
||||||
|
"country": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Country doc"
|
||||||
|
},
|
||||||
|
"postcode": {
|
||||||
|
"type": "integer",
|
||||||
|
"format": "int32",
|
||||||
|
"description": "PostCode doc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
**Very Important Notes:**
|
||||||
|
- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
|
||||||
|
- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
|
||||||
|
|
||||||
|
Notes
|
||||||
|
--
|
||||||
|
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
|
||||||
|
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
|
||||||
|
|
||||||
|
© 2017, ernestmicklei.com. MIT License. Contributions welcome.
|
64
vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
generated
vendored
Normal file
64
vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
|
||||||
|
type ApiDeclarationList struct {
|
||||||
|
List []ApiDeclaration
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns the ApiDeclaration by its path unless absent, then ok is false
|
||||||
|
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
if each.ResourcePath == path {
|
||||||
|
return each, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put adds or replaces a ApiDeclaration with this name
|
||||||
|
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
|
||||||
|
// maybe replace existing
|
||||||
|
for i, each := range l.List {
|
||||||
|
if each.ResourcePath == path {
|
||||||
|
// replace
|
||||||
|
l.List[i] = a
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
l.List = append(l.List, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do enumerates all the properties, each with its assigned name
|
||||||
|
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
block(each.ResourcePath, each)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
|
||||||
|
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
encoder := json.NewEncoder(&buf)
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
for i, each := range l.List {
|
||||||
|
buf.WriteString("\"")
|
||||||
|
buf.WriteString(each.ResourcePath)
|
||||||
|
buf.WriteString("\": ")
|
||||||
|
encoder.Encode(each)
|
||||||
|
if i < len(l.List)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
|
||||||
|
type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
|
||||||
|
|
||||||
|
// MapSchemaFormatFunc can be used to modify typeName at definition time.
|
||||||
|
type MapSchemaFormatFunc func(typeName string) string
|
||||||
|
|
||||||
|
// MapModelTypeNameFunc can be used to return the desired typeName for a given
|
||||||
|
// type. It will return false if the default name should be used.
|
||||||
|
type MapModelTypeNameFunc func(t reflect.Type) (string, bool)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// url where the services are available, e.g. http://localhost:8080
|
||||||
|
// if left empty then the basePath of Swagger is taken from the actual request
|
||||||
|
WebServicesUrl string
|
||||||
|
// path where the JSON api is avaiable , e.g. /apidocs
|
||||||
|
ApiPath string
|
||||||
|
// [optional] path where the swagger UI will be served, e.g. /swagger
|
||||||
|
SwaggerPath string
|
||||||
|
// [optional] location of folder containing Swagger HTML5 application index.html
|
||||||
|
SwaggerFilePath string
|
||||||
|
// api listing is constructed from this list of restful WebServices.
|
||||||
|
WebServices []*restful.WebService
|
||||||
|
// will serve all static content (scripts,pages,images)
|
||||||
|
StaticHandler http.Handler
|
||||||
|
// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
|
||||||
|
DisableCORS bool
|
||||||
|
// Top-level API version. Is reflected in the resource listing.
|
||||||
|
ApiVersion string
|
||||||
|
// If set then call this handler after building the complete ApiDeclaration Map
|
||||||
|
PostBuildHandler PostBuildDeclarationMapFunc
|
||||||
|
// Swagger global info struct
|
||||||
|
Info Info
|
||||||
|
// [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion.
|
||||||
|
SchemaFormatHandler MapSchemaFormatFunc
|
||||||
|
// [optional] If set, model builder should call this handler to retrieve the name for a given type.
|
||||||
|
ModelTypeNameHandler MapModelTypeNameFunc
|
||||||
|
}
|
|
@ -0,0 +1,467 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ModelBuildable is used for extending Structs that need more control over
|
||||||
|
// how the Model appears in the Swagger api declaration.
|
||||||
|
type ModelBuildable interface {
|
||||||
|
PostBuildModel(m *Model) *Model
|
||||||
|
}
|
||||||
|
|
||||||
|
type modelBuilder struct {
|
||||||
|
Models *ModelList
|
||||||
|
Config *Config
|
||||||
|
}
|
||||||
|
|
||||||
|
type documentable interface {
|
||||||
|
SwaggerDoc() map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
|
||||||
|
// If it exists, retrive the documentation and overwrite all struct tag descriptions
|
||||||
|
func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
|
||||||
|
if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
|
||||||
|
return docable.SwaggerDoc()
|
||||||
|
}
|
||||||
|
return make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addModelFrom creates and adds a Model to the builder and detects and calls
|
||||||
|
// the post build hook for customizations
|
||||||
|
func (b modelBuilder) addModelFrom(sample interface{}) {
|
||||||
|
if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
|
||||||
|
// allow customizations
|
||||||
|
if buildable, ok := sample.(ModelBuildable); ok {
|
||||||
|
modelOrNil = buildable.PostBuildModel(modelOrNil)
|
||||||
|
b.Models.Put(modelOrNil.Id, *modelOrNil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
|
||||||
|
// Turn pointers into simpler types so further checks are
|
||||||
|
// correct.
|
||||||
|
if st.Kind() == reflect.Ptr {
|
||||||
|
st = st.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
modelName := b.keyFrom(st)
|
||||||
|
if nameOverride != "" {
|
||||||
|
modelName = nameOverride
|
||||||
|
}
|
||||||
|
// no models needed for primitive types
|
||||||
|
if b.isPrimitiveType(modelName) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// golang encoding/json packages says array and slice values encode as
|
||||||
|
// JSON arrays, except that []byte encodes as a base64-encoded string.
|
||||||
|
// If we see a []byte here, treat it at as a primitive type (string)
|
||||||
|
// and deal with it in buildArrayTypeProperty.
|
||||||
|
if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) &&
|
||||||
|
st.Elem().Kind() == reflect.Uint8 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// see if we already have visited this model
|
||||||
|
if _, ok := b.Models.At(modelName); ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sm := Model{
|
||||||
|
Id: modelName,
|
||||||
|
Required: []string{},
|
||||||
|
Properties: ModelPropertyList{}}
|
||||||
|
|
||||||
|
// reference the model before further initializing (enables recursive structs)
|
||||||
|
b.Models.Put(modelName, sm)
|
||||||
|
|
||||||
|
// check for slice or array
|
||||||
|
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
|
||||||
|
b.addModel(st.Elem(), "")
|
||||||
|
return &sm
|
||||||
|
}
|
||||||
|
// check for structure or primitive type
|
||||||
|
if st.Kind() != reflect.Struct {
|
||||||
|
return &sm
|
||||||
|
}
|
||||||
|
|
||||||
|
fullDoc := getDocFromMethodSwaggerDoc2(st)
|
||||||
|
modelDescriptions := []string{}
|
||||||
|
|
||||||
|
for i := 0; i < st.NumField(); i++ {
|
||||||
|
field := st.Field(i)
|
||||||
|
jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
|
||||||
|
if len(modelDescription) > 0 {
|
||||||
|
modelDescriptions = append(modelDescriptions, modelDescription)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add if not omitted
|
||||||
|
if len(jsonName) != 0 {
|
||||||
|
// update description
|
||||||
|
if fieldDoc, ok := fullDoc[jsonName]; ok {
|
||||||
|
prop.Description = fieldDoc
|
||||||
|
}
|
||||||
|
// update Required
|
||||||
|
if b.isPropertyRequired(field) {
|
||||||
|
sm.Required = append(sm.Required, jsonName)
|
||||||
|
}
|
||||||
|
sm.Properties.Put(jsonName, prop)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We always overwrite documentation if SwaggerDoc method exists
|
||||||
|
// "" is special for documenting the struct itself
|
||||||
|
if modelDoc, ok := fullDoc[""]; ok {
|
||||||
|
sm.Description = modelDoc
|
||||||
|
} else if len(modelDescriptions) != 0 {
|
||||||
|
sm.Description = strings.Join(modelDescriptions, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// update model builder with completed model
|
||||||
|
b.Models.Put(modelName, sm)
|
||||||
|
|
||||||
|
return &sm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
|
||||||
|
required := true
|
||||||
|
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||||
|
s := strings.Split(jsonTag, ",")
|
||||||
|
if len(s) > 1 && s[1] == "omitempty" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return required
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
|
||||||
|
jsonName = b.jsonNameOfField(field)
|
||||||
|
if len(jsonName) == 0 {
|
||||||
|
// empty name signals skip property
|
||||||
|
return "", "", prop
|
||||||
|
}
|
||||||
|
|
||||||
|
if field.Name == "XMLName" && field.Type.String() == "xml.Name" {
|
||||||
|
// property is metadata for the xml.Name attribute, can be skipped
|
||||||
|
return "", "", prop
|
||||||
|
}
|
||||||
|
|
||||||
|
if tag := field.Tag.Get("modelDescription"); tag != "" {
|
||||||
|
modelDescription = tag
|
||||||
|
}
|
||||||
|
|
||||||
|
prop.setPropertyMetadata(field)
|
||||||
|
if prop.Type != nil {
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
fieldType := field.Type
|
||||||
|
|
||||||
|
// check if type is doing its own marshalling
|
||||||
|
marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
|
||||||
|
if fieldType.Implements(marshalerType) {
|
||||||
|
var pType = "string"
|
||||||
|
if prop.Type == nil {
|
||||||
|
prop.Type = &pType
|
||||||
|
}
|
||||||
|
if prop.Format == "" {
|
||||||
|
prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType))
|
||||||
|
}
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if annotation says it is a string
|
||||||
|
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||||
|
s := strings.Split(jsonTag, ",")
|
||||||
|
if len(s) > 1 && s[1] == "string" {
|
||||||
|
stringt := "string"
|
||||||
|
prop.Type = &stringt
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldKind := fieldType.Kind()
|
||||||
|
switch {
|
||||||
|
case fieldKind == reflect.Struct:
|
||||||
|
jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
case fieldKind == reflect.Slice || fieldKind == reflect.Array:
|
||||||
|
jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
case fieldKind == reflect.Ptr:
|
||||||
|
jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
case fieldKind == reflect.String:
|
||||||
|
stringt := "string"
|
||||||
|
prop.Type = &stringt
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
case fieldKind == reflect.Map:
|
||||||
|
// if it's a map, it's unstructured, and swagger 1.2 can't handle it
|
||||||
|
objectType := "object"
|
||||||
|
prop.Type = &objectType
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldTypeName := b.keyFrom(fieldType)
|
||||||
|
if b.isPrimitiveType(fieldTypeName) {
|
||||||
|
mapped := b.jsonSchemaType(fieldTypeName)
|
||||||
|
prop.Type = &mapped
|
||||||
|
prop.Format = b.jsonSchemaFormat(fieldTypeName)
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
modelType := b.keyFrom(fieldType)
|
||||||
|
prop.Ref = &modelType
|
||||||
|
|
||||||
|
if fieldType.Name() == "" { // override type of anonymous structs
|
||||||
|
nestedTypeName := modelName + "." + jsonName
|
||||||
|
prop.Ref = &nestedTypeName
|
||||||
|
b.addModel(fieldType, nestedTypeName)
|
||||||
|
}
|
||||||
|
return jsonName, modelDescription, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasNamedJSONTag(field reflect.StructField) bool {
|
||||||
|
parts := strings.Split(field.Tag.Get("json"), ",")
|
||||||
|
if len(parts) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, s := range parts[1:] {
|
||||||
|
if s == "inline" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(parts[0]) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
|
||||||
|
prop.setPropertyMetadata(field)
|
||||||
|
// Check for type override in tag
|
||||||
|
if prop.Type != nil {
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
fieldType := field.Type
|
||||||
|
// check for anonymous
|
||||||
|
if len(fieldType.Name()) == 0 {
|
||||||
|
// anonymous
|
||||||
|
anonType := model.Id + "." + jsonName
|
||||||
|
b.addModel(fieldType, anonType)
|
||||||
|
prop.Ref = &anonType
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
|
||||||
|
// embedded struct
|
||||||
|
sub := modelBuilder{new(ModelList), b.Config}
|
||||||
|
sub.addModel(fieldType, "")
|
||||||
|
subKey := sub.keyFrom(fieldType)
|
||||||
|
// merge properties from sub
|
||||||
|
subModel, _ := sub.Models.At(subKey)
|
||||||
|
subModel.Properties.Do(func(k string, v ModelProperty) {
|
||||||
|
model.Properties.Put(k, v)
|
||||||
|
// if subModel says this property is required then include it
|
||||||
|
required := false
|
||||||
|
for _, each := range subModel.Required {
|
||||||
|
if k == each {
|
||||||
|
required = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if required {
|
||||||
|
model.Required = append(model.Required, k)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// add all new referenced models
|
||||||
|
sub.Models.Do(func(key string, sub Model) {
|
||||||
|
if key != subKey {
|
||||||
|
if _, ok := b.Models.At(key); !ok {
|
||||||
|
b.Models.Put(key, sub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// empty name signals skip property
|
||||||
|
return "", prop
|
||||||
|
}
|
||||||
|
// simple struct
|
||||||
|
b.addModel(fieldType, "")
|
||||||
|
var pType = b.keyFrom(fieldType)
|
||||||
|
prop.Ref = &pType
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
|
||||||
|
// check for type override in tags
|
||||||
|
prop.setPropertyMetadata(field)
|
||||||
|
if prop.Type != nil {
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
fieldType := field.Type
|
||||||
|
if fieldType.Elem().Kind() == reflect.Uint8 {
|
||||||
|
stringt := "string"
|
||||||
|
prop.Type = &stringt
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
var pType = "array"
|
||||||
|
prop.Type = &pType
|
||||||
|
isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
|
||||||
|
elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
|
||||||
|
prop.Items = new(Item)
|
||||||
|
if isPrimitive {
|
||||||
|
mapped := b.jsonSchemaType(elemTypeName)
|
||||||
|
prop.Items.Type = &mapped
|
||||||
|
} else {
|
||||||
|
prop.Items.Ref = &elemTypeName
|
||||||
|
}
|
||||||
|
// add|overwrite model for element type
|
||||||
|
if fieldType.Elem().Kind() == reflect.Ptr {
|
||||||
|
fieldType = fieldType.Elem()
|
||||||
|
}
|
||||||
|
if !isPrimitive {
|
||||||
|
b.addModel(fieldType.Elem(), elemTypeName)
|
||||||
|
}
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
|
||||||
|
prop.setPropertyMetadata(field)
|
||||||
|
// Check for type override in tags
|
||||||
|
if prop.Type != nil {
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
fieldType := field.Type
|
||||||
|
|
||||||
|
// override type of pointer to list-likes
|
||||||
|
if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
|
||||||
|
var pType = "array"
|
||||||
|
prop.Type = &pType
|
||||||
|
isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
|
||||||
|
elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
|
||||||
|
if isPrimitive {
|
||||||
|
primName := b.jsonSchemaType(elemName)
|
||||||
|
prop.Items = &Item{Ref: &primName}
|
||||||
|
} else {
|
||||||
|
prop.Items = &Item{Ref: &elemName}
|
||||||
|
}
|
||||||
|
if !isPrimitive {
|
||||||
|
// add|overwrite model for element type
|
||||||
|
b.addModel(fieldType.Elem().Elem(), elemName)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// non-array, pointer type
|
||||||
|
fieldTypeName := b.keyFrom(fieldType.Elem())
|
||||||
|
var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path
|
||||||
|
if b.isPrimitiveType(fieldTypeName) {
|
||||||
|
prop.Type = &pType
|
||||||
|
prop.Format = b.jsonSchemaFormat(fieldTypeName)
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
prop.Ref = &pType
|
||||||
|
elemName := ""
|
||||||
|
if fieldType.Elem().Name() == "" {
|
||||||
|
elemName = modelName + "." + jsonName
|
||||||
|
prop.Ref = &elemName
|
||||||
|
}
|
||||||
|
b.addModel(fieldType.Elem(), elemName)
|
||||||
|
}
|
||||||
|
return jsonName, prop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
|
||||||
|
if t.Kind() == reflect.Ptr {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
|
if t.Name() == "" {
|
||||||
|
return modelName + "." + jsonName
|
||||||
|
}
|
||||||
|
return b.keyFrom(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) keyFrom(st reflect.Type) string {
|
||||||
|
key := st.String()
|
||||||
|
if b.Config != nil && b.Config.ModelTypeNameHandler != nil {
|
||||||
|
if name, ok := b.Config.ModelTypeNameHandler(st); ok {
|
||||||
|
key = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(st.Name()) == 0 { // unnamed type
|
||||||
|
// Swagger UI has special meaning for [
|
||||||
|
key = strings.Replace(key, "[]", "||", -1)
|
||||||
|
}
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
// see also https://golang.org/ref/spec#Numeric_types
|
||||||
|
func (b modelBuilder) isPrimitiveType(modelName string) bool {
|
||||||
|
if len(modelName) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// jsonNameOfField returns the name of the field as it should appear in JSON format
|
||||||
|
// An empty string indicates that this field is not part of the JSON representation
|
||||||
|
func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
|
||||||
|
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||||
|
s := strings.Split(jsonTag, ",")
|
||||||
|
if s[0] == "-" {
|
||||||
|
// empty name signals skip property
|
||||||
|
return ""
|
||||||
|
} else if s[0] != "" {
|
||||||
|
return s[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return field.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// see also http://json-schema.org/latest/json-schema-core.html#anchor8
|
||||||
|
func (b modelBuilder) jsonSchemaType(modelName string) string {
|
||||||
|
schemaMap := map[string]string{
|
||||||
|
"uint": "integer",
|
||||||
|
"uint8": "integer",
|
||||||
|
"uint16": "integer",
|
||||||
|
"uint32": "integer",
|
||||||
|
"uint64": "integer",
|
||||||
|
|
||||||
|
"int": "integer",
|
||||||
|
"int8": "integer",
|
||||||
|
"int16": "integer",
|
||||||
|
"int32": "integer",
|
||||||
|
"int64": "integer",
|
||||||
|
|
||||||
|
"byte": "integer",
|
||||||
|
"float64": "number",
|
||||||
|
"float32": "number",
|
||||||
|
"bool": "boolean",
|
||||||
|
"time.Time": "string",
|
||||||
|
}
|
||||||
|
mapped, ok := schemaMap[modelName]
|
||||||
|
if !ok {
|
||||||
|
return modelName // use as is (custom or struct)
|
||||||
|
}
|
||||||
|
return mapped
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b modelBuilder) jsonSchemaFormat(modelName string) string {
|
||||||
|
if b.Config != nil && b.Config.SchemaFormatHandler != nil {
|
||||||
|
if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
|
||||||
|
return mapped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
schemaMap := map[string]string{
|
||||||
|
"int": "int32",
|
||||||
|
"int32": "int32",
|
||||||
|
"int64": "int64",
|
||||||
|
"byte": "byte",
|
||||||
|
"uint": "integer",
|
||||||
|
"uint8": "byte",
|
||||||
|
"float64": "double",
|
||||||
|
"float32": "float",
|
||||||
|
"time.Time": "date-time",
|
||||||
|
"*time.Time": "date-time",
|
||||||
|
}
|
||||||
|
mapped, ok := schemaMap[modelName]
|
||||||
|
if !ok {
|
||||||
|
return "" // no format
|
||||||
|
}
|
||||||
|
return mapped
|
||||||
|
}
|
|
@ -0,0 +1,86 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NamedModel associates a name with a Model (not using its Id)
|
||||||
|
type NamedModel struct {
|
||||||
|
Name string
|
||||||
|
Model Model
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelList encapsulates a list of NamedModel (association)
|
||||||
|
type ModelList struct {
|
||||||
|
List []NamedModel
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put adds or replaces a Model by its name
|
||||||
|
func (l *ModelList) Put(name string, model Model) {
|
||||||
|
for i, each := range l.List {
|
||||||
|
if each.Name == name {
|
||||||
|
// replace
|
||||||
|
l.List[i] = NamedModel{name, model}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
l.List = append(l.List, NamedModel{name, model})
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns a Model by its name, ok is false if absent
|
||||||
|
func (l *ModelList) At(name string) (m Model, ok bool) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
if each.Name == name {
|
||||||
|
return each.Model, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do enumerates all the models, each with its assigned name
|
||||||
|
func (l *ModelList) Do(block func(name string, value Model)) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
block(each.Name, each.Model)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON writes the ModelList as if it was a map[string]Model
|
||||||
|
func (l ModelList) MarshalJSON() ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
encoder := json.NewEncoder(&buf)
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
for i, each := range l.List {
|
||||||
|
buf.WriteString("\"")
|
||||||
|
buf.WriteString(each.Name)
|
||||||
|
buf.WriteString("\": ")
|
||||||
|
encoder.Encode(each.Model)
|
||||||
|
if i < len(l.List)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON reads back a ModelList. This is an expensive operation.
|
||||||
|
func (l *ModelList) UnmarshalJSON(data []byte) error {
|
||||||
|
raw := map[string]interface{}{}
|
||||||
|
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
|
||||||
|
for k, v := range raw {
|
||||||
|
// produces JSON bytes for each value
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var m Model
|
||||||
|
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
|
||||||
|
l.Put(k, m)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
81
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go
generated
vendored
Normal file
81
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setDescription(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("description"); tag != "" {
|
||||||
|
prop.Description = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("default"); tag != "" {
|
||||||
|
prop.DefaultValue = Special(tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
|
||||||
|
// We use | to separate the enum values. This value is chosen
|
||||||
|
// since its unlikely to be useful in actual enumeration values.
|
||||||
|
if tag := field.Tag.Get("enum"); tag != "" {
|
||||||
|
prop.Enum = strings.Split(tag, "|")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setMaximum(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("maximum"); tag != "" {
|
||||||
|
prop.Maximum = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setType(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("type"); tag != "" {
|
||||||
|
// Check if the first two characters of the type tag are
|
||||||
|
// intended to emulate slice/array behaviour.
|
||||||
|
//
|
||||||
|
// If type is intended to be a slice/array then add the
|
||||||
|
// overriden type to the array item instead of the main property
|
||||||
|
if len(tag) > 2 && tag[0:2] == "[]" {
|
||||||
|
pType := "array"
|
||||||
|
prop.Type = &pType
|
||||||
|
prop.Items = new(Item)
|
||||||
|
|
||||||
|
iType := tag[2:]
|
||||||
|
prop.Items.Type = &iType
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
prop.Type = &tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setMinimum(field reflect.StructField) {
|
||||||
|
if tag := field.Tag.Get("minimum"); tag != "" {
|
||||||
|
prop.Minimum = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
|
||||||
|
tag := field.Tag.Get("unique")
|
||||||
|
switch tag {
|
||||||
|
case "true":
|
||||||
|
v := true
|
||||||
|
prop.UniqueItems = &v
|
||||||
|
case "false":
|
||||||
|
v := false
|
||||||
|
prop.UniqueItems = &v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
|
||||||
|
prop.setDescription(field)
|
||||||
|
prop.setEnumValues(field)
|
||||||
|
prop.setMinimum(field)
|
||||||
|
prop.setMaximum(field)
|
||||||
|
prop.setUniqueItems(field)
|
||||||
|
prop.setDefaultValue(field)
|
||||||
|
prop.setType(field)
|
||||||
|
}
|
87
vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go
generated
vendored
Normal file
87
vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NamedModelProperty associates a name to a ModelProperty
|
||||||
|
type NamedModelProperty struct {
|
||||||
|
Name string
|
||||||
|
Property ModelProperty
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelPropertyList encapsulates a list of NamedModelProperty (association)
|
||||||
|
type ModelPropertyList struct {
|
||||||
|
List []NamedModelProperty
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns the ModelPropety by its name unless absent, then ok is false
|
||||||
|
func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
if each.Name == name {
|
||||||
|
return each.Property, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put adds or replaces a ModelProperty with this name
|
||||||
|
func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
|
||||||
|
// maybe replace existing
|
||||||
|
for i, each := range l.List {
|
||||||
|
if each.Name == name {
|
||||||
|
// replace
|
||||||
|
l.List[i] = NamedModelProperty{Name: name, Property: prop}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add
|
||||||
|
l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do enumerates all the properties, each with its assigned name
|
||||||
|
func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
|
||||||
|
for _, each := range l.List {
|
||||||
|
block(each.Name, each.Property)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
|
||||||
|
func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
encoder := json.NewEncoder(&buf)
|
||||||
|
buf.WriteString("{\n")
|
||||||
|
for i, each := range l.List {
|
||||||
|
buf.WriteString("\"")
|
||||||
|
buf.WriteString(each.Name)
|
||||||
|
buf.WriteString("\": ")
|
||||||
|
encoder.Encode(each.Property)
|
||||||
|
if i < len(l.List)-1 {
|
||||||
|
buf.WriteString(",\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
|
||||||
|
func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
|
||||||
|
raw := map[string]interface{}{}
|
||||||
|
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
|
||||||
|
for k, v := range raw {
|
||||||
|
// produces JSON bytes for each value
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var m ModelProperty
|
||||||
|
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
|
||||||
|
l.Put(k, m)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
36
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go
generated
vendored
Normal file
36
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license
|
||||||
|
// that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
import "github.com/emicklei/go-restful"
|
||||||
|
|
||||||
|
type orderedRouteMap struct {
|
||||||
|
elements map[string][]restful.Route
|
||||||
|
keys []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOrderedRouteMap() *orderedRouteMap {
|
||||||
|
return &orderedRouteMap{
|
||||||
|
elements: map[string][]restful.Route{},
|
||||||
|
keys: []string{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *orderedRouteMap) Add(key string, route restful.Route) {
|
||||||
|
routes, ok := o.elements[key]
|
||||||
|
if ok {
|
||||||
|
routes = append(routes, route)
|
||||||
|
o.elements[key] = routes
|
||||||
|
return
|
||||||
|
}
|
||||||
|
o.elements[key] = []restful.Route{route}
|
||||||
|
o.keys = append(o.keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
|
||||||
|
for _, k := range o.keys {
|
||||||
|
block(k, o.elements[k])
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,185 @@
|
||||||
|
// Package swagger implements the structures of the Swagger
|
||||||
|
// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
const swaggerVersion = "1.2"
|
||||||
|
|
||||||
|
// 4.3.3 Data Type Fields
|
||||||
|
type DataTypeFields struct {
|
||||||
|
Type *string `json:"type,omitempty"` // if Ref not used
|
||||||
|
Ref *string `json:"$ref,omitempty"` // if Type not used
|
||||||
|
Format string `json:"format,omitempty"`
|
||||||
|
DefaultValue Special `json:"defaultValue,omitempty"`
|
||||||
|
Enum []string `json:"enum,omitempty"`
|
||||||
|
Minimum string `json:"minimum,omitempty"`
|
||||||
|
Maximum string `json:"maximum,omitempty"`
|
||||||
|
Items *Item `json:"items,omitempty"`
|
||||||
|
UniqueItems *bool `json:"uniqueItems,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Special string
|
||||||
|
|
||||||
|
// 4.3.4 Items Object
|
||||||
|
type Item struct {
|
||||||
|
Type *string `json:"type,omitempty"`
|
||||||
|
Ref *string `json:"$ref,omitempty"`
|
||||||
|
Format string `json:"format,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1 Resource Listing
|
||||||
|
type ResourceListing struct {
|
||||||
|
SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
|
||||||
|
Apis []Resource `json:"apis"`
|
||||||
|
ApiVersion string `json:"apiVersion"`
|
||||||
|
Info Info `json:"info"`
|
||||||
|
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.2 Resource Object
|
||||||
|
type Resource struct {
|
||||||
|
Path string `json:"path"` // relative or absolute, must start with /
|
||||||
|
Description string `json:"description"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.3 Info Object
|
||||||
|
type Info struct {
|
||||||
|
Title string `json:"title"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
|
||||||
|
Contact string `json:"contact,omitempty"`
|
||||||
|
License string `json:"license,omitempty"`
|
||||||
|
LicenseUrl string `json:"licenseUrl,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.5
|
||||||
|
type Authorization struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
PassAs string `json:"passAs"`
|
||||||
|
Keyname string `json:"keyname"`
|
||||||
|
Scopes []Scope `json:"scopes"`
|
||||||
|
GrantTypes []GrantType `json:"grandTypes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.6, 5.2.11
|
||||||
|
type Scope struct {
|
||||||
|
// Required. The name of the scope.
|
||||||
|
Scope string `json:"scope"`
|
||||||
|
// Recommended. A short description of the scope.
|
||||||
|
Description string `json:"description"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.7
|
||||||
|
type GrantType struct {
|
||||||
|
Implicit Implicit `json:"implicit"`
|
||||||
|
AuthorizationCode AuthorizationCode `json:"authorization_code"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.8 Implicit Object
|
||||||
|
type Implicit struct {
|
||||||
|
// Required. The login endpoint definition.
|
||||||
|
loginEndpoint LoginEndpoint `json:"loginEndpoint"`
|
||||||
|
// An optional alternative name to standard "access_token" OAuth2 parameter.
|
||||||
|
TokenName string `json:"tokenName"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.9 Authorization Code Object
|
||||||
|
type AuthorizationCode struct {
|
||||||
|
TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
|
||||||
|
TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.10 Login Endpoint Object
|
||||||
|
type LoginEndpoint struct {
|
||||||
|
// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
|
||||||
|
Url string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.11 Token Request Endpoint Object
|
||||||
|
type TokenRequestEndpoint struct {
|
||||||
|
// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
|
||||||
|
Url string `json:"url"`
|
||||||
|
// An optional alternative name to standard "client_id" OAuth2 parameter.
|
||||||
|
ClientIdName string `json:"clientIdName"`
|
||||||
|
// An optional alternative name to the standard "client_secret" OAuth2 parameter.
|
||||||
|
ClientSecretName string `json:"clientSecretName"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.1.12 Token Endpoint Object
|
||||||
|
type TokenEndpoint struct {
|
||||||
|
// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
|
||||||
|
Url string `json:"url"`
|
||||||
|
// An optional alternative name to standard "access_token" OAuth2 parameter.
|
||||||
|
TokenName string `json:"tokenName"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2 API Declaration
|
||||||
|
type ApiDeclaration struct {
|
||||||
|
SwaggerVersion string `json:"swaggerVersion"`
|
||||||
|
ApiVersion string `json:"apiVersion"`
|
||||||
|
BasePath string `json:"basePath"`
|
||||||
|
ResourcePath string `json:"resourcePath"` // must start with /
|
||||||
|
Info Info `json:"info"`
|
||||||
|
Apis []Api `json:"apis,omitempty"`
|
||||||
|
Models ModelList `json:"models,omitempty"`
|
||||||
|
Produces []string `json:"produces,omitempty"`
|
||||||
|
Consumes []string `json:"consumes,omitempty"`
|
||||||
|
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.2 API Object
|
||||||
|
type Api struct {
|
||||||
|
Path string `json:"path"` // relative or absolute, must start with /
|
||||||
|
Description string `json:"description"`
|
||||||
|
Operations []Operation `json:"operations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.3 Operation Object
|
||||||
|
type Operation struct {
|
||||||
|
DataTypeFields
|
||||||
|
Method string `json:"method"`
|
||||||
|
Summary string `json:"summary,omitempty"`
|
||||||
|
Notes string `json:"notes,omitempty"`
|
||||||
|
Nickname string `json:"nickname"`
|
||||||
|
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||||
|
Parameters []Parameter `json:"parameters"`
|
||||||
|
ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
|
||||||
|
Produces []string `json:"produces,omitempty"`
|
||||||
|
Consumes []string `json:"consumes,omitempty"`
|
||||||
|
Deprecated string `json:"deprecated,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.4 Parameter Object
|
||||||
|
type Parameter struct {
|
||||||
|
DataTypeFields
|
||||||
|
ParamType string `json:"paramType"` // path,query,body,header,form
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Required bool `json:"required"`
|
||||||
|
AllowMultiple bool `json:"allowMultiple"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.5 Response Message Object
|
||||||
|
type ResponseMessage struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
ResponseModel string `json:"responseModel,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.6, 5.2.7 Models Object
|
||||||
|
type Model struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Required []string `json:"required,omitempty"`
|
||||||
|
Properties ModelPropertyList `json:"properties"`
|
||||||
|
SubTypes []string `json:"subTypes,omitempty"`
|
||||||
|
Discriminator string `json:"discriminator,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.8 Properties Object
|
||||||
|
type ModelProperty struct {
|
||||||
|
DataTypeFields
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.2.10
|
||||||
|
type Authorizations map[string]Authorization
|
21
vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go
generated
vendored
Normal file
21
vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
type SwaggerBuilder struct {
|
||||||
|
SwaggerService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSwaggerBuilder(config Config) *SwaggerBuilder {
|
||||||
|
return &SwaggerBuilder{*newSwaggerService(config)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb SwaggerBuilder) ProduceListing() ResourceListing {
|
||||||
|
return sb.SwaggerService.produceListing()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
|
||||||
|
return sb.SwaggerService.produceAllDeclarations()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
|
||||||
|
return sb.SwaggerService.produceDeclarations(route)
|
||||||
|
}
|
443
vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go
generated
vendored
Normal file
443
vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go
generated
vendored
Normal file
|
@ -0,0 +1,443 @@
|
||||||
|
package swagger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful"
|
||||||
|
// "github.com/emicklei/hopwatch"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SwaggerService struct {
|
||||||
|
config Config
|
||||||
|
apiDeclarationMap *ApiDeclarationList
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSwaggerService(config Config) *SwaggerService {
|
||||||
|
sws := &SwaggerService{
|
||||||
|
config: config,
|
||||||
|
apiDeclarationMap: new(ApiDeclarationList)}
|
||||||
|
|
||||||
|
// Build all ApiDeclarations
|
||||||
|
for _, each := range config.WebServices {
|
||||||
|
rootPath := each.RootPath()
|
||||||
|
// skip the api service itself
|
||||||
|
if rootPath != config.ApiPath {
|
||||||
|
if rootPath == "" || rootPath == "/" {
|
||||||
|
// use routes
|
||||||
|
for _, route := range each.Routes() {
|
||||||
|
entry := staticPathFromRoute(route)
|
||||||
|
_, exists := sws.apiDeclarationMap.At(entry)
|
||||||
|
if !exists {
|
||||||
|
sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else { // use root path
|
||||||
|
sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if specified then call the PostBuilderHandler
|
||||||
|
if config.PostBuildHandler != nil {
|
||||||
|
config.PostBuildHandler(sws.apiDeclarationMap)
|
||||||
|
}
|
||||||
|
return sws
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
|
||||||
|
var LogInfo = func(format string, v ...interface{}) {
|
||||||
|
// use the restful package-wide logger
|
||||||
|
log.Printf(format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstallSwaggerService add the WebService that provides the API documentation of all services
|
||||||
|
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
|
||||||
|
func InstallSwaggerService(aSwaggerConfig Config) {
|
||||||
|
RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterSwaggerService add the WebService that provides the API documentation of all services
|
||||||
|
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
|
||||||
|
func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
|
||||||
|
sws := newSwaggerService(config)
|
||||||
|
ws := new(restful.WebService)
|
||||||
|
ws.Path(config.ApiPath)
|
||||||
|
ws.Produces(restful.MIME_JSON)
|
||||||
|
if config.DisableCORS {
|
||||||
|
ws.Filter(enableCORS)
|
||||||
|
}
|
||||||
|
ws.Route(ws.GET("/").To(sws.getListing))
|
||||||
|
ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
|
||||||
|
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
|
||||||
|
LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
|
||||||
|
wsContainer.Add(ws)
|
||||||
|
|
||||||
|
// Check paths for UI serving
|
||||||
|
if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
|
||||||
|
swaggerPathSlash := config.SwaggerPath
|
||||||
|
// path must end with slash /
|
||||||
|
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
|
||||||
|
LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
|
||||||
|
swaggerPathSlash += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
|
||||||
|
wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
|
||||||
|
|
||||||
|
//if we define a custom static handler use it
|
||||||
|
} else if config.StaticHandler != nil && config.SwaggerPath != "" {
|
||||||
|
swaggerPathSlash := config.SwaggerPath
|
||||||
|
// path must end with slash /
|
||||||
|
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
|
||||||
|
LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
|
||||||
|
swaggerPathSlash += "/"
|
||||||
|
|
||||||
|
}
|
||||||
|
LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
|
||||||
|
wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func staticPathFromRoute(r restful.Route) string {
|
||||||
|
static := r.Path
|
||||||
|
bracket := strings.Index(static, "{")
|
||||||
|
if bracket <= 1 { // result cannot be empty
|
||||||
|
return static
|
||||||
|
}
|
||||||
|
if bracket != -1 {
|
||||||
|
static = r.Path[:bracket]
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(static, "/") {
|
||||||
|
return static[:len(static)-1]
|
||||||
|
} else {
|
||||||
|
return static
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
|
||||||
|
if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
|
||||||
|
// prevent duplicate header
|
||||||
|
if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
|
||||||
|
resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chain.ProcessFilter(req, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
|
||||||
|
listing := sws.produceListing()
|
||||||
|
resp.WriteAsJson(listing)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) produceListing() ResourceListing {
|
||||||
|
listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
|
||||||
|
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
|
||||||
|
ref := Resource{Path: k}
|
||||||
|
if len(v.Apis) > 0 { // use description of first (could still be empty)
|
||||||
|
ref.Description = v.Apis[0].Description
|
||||||
|
}
|
||||||
|
listing.Apis = append(listing.Apis, ref)
|
||||||
|
})
|
||||||
|
return listing
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
|
||||||
|
decl, ok := sws.produceDeclarations(composeRootPath(req))
|
||||||
|
if !ok {
|
||||||
|
resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// unless WebServicesUrl is given
|
||||||
|
if len(sws.config.WebServicesUrl) == 0 {
|
||||||
|
// update base path from the actual request
|
||||||
|
// TODO how to detect https? assume http for now
|
||||||
|
var host string
|
||||||
|
// X-Forwarded-Host or Host or Request.Host
|
||||||
|
hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
|
||||||
|
if !ok || len(hostvalues) == 0 {
|
||||||
|
forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
|
||||||
|
if !ok || len(forwarded) == 0 {
|
||||||
|
// fallback to Host field
|
||||||
|
host = req.Request.Host
|
||||||
|
} else {
|
||||||
|
host = forwarded[0]
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
host = hostvalues[0]
|
||||||
|
}
|
||||||
|
// inspect Referer for the scheme (http vs https)
|
||||||
|
scheme := "http"
|
||||||
|
if referer := req.Request.Header["Referer"]; len(referer) > 0 {
|
||||||
|
if strings.HasPrefix(referer[0], "https") {
|
||||||
|
scheme = "https"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
|
||||||
|
}
|
||||||
|
resp.WriteAsJson(decl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
|
||||||
|
decls := map[string]ApiDeclaration{}
|
||||||
|
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
|
||||||
|
decls[k] = v
|
||||||
|
})
|
||||||
|
return decls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
|
||||||
|
decl, ok := sws.apiDeclarationMap.At(route)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
decl.BasePath = sws.config.WebServicesUrl
|
||||||
|
return &decl, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// composeDeclaration uses all routes and parameters to create a ApiDeclaration
|
||||||
|
func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
|
||||||
|
decl := ApiDeclaration{
|
||||||
|
SwaggerVersion: swaggerVersion,
|
||||||
|
BasePath: sws.config.WebServicesUrl,
|
||||||
|
ResourcePath: pathPrefix,
|
||||||
|
Models: ModelList{},
|
||||||
|
ApiVersion: ws.Version()}
|
||||||
|
|
||||||
|
// collect any path parameters
|
||||||
|
rootParams := []Parameter{}
|
||||||
|
for _, param := range ws.PathParameters() {
|
||||||
|
rootParams = append(rootParams, asSwaggerParameter(param.Data()))
|
||||||
|
}
|
||||||
|
// aggregate by path
|
||||||
|
pathToRoutes := newOrderedRouteMap()
|
||||||
|
for _, other := range ws.Routes() {
|
||||||
|
if strings.HasPrefix(other.Path, pathPrefix) {
|
||||||
|
if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pathToRoutes.Add(other.Path, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pathToRoutes.Do(func(path string, routes []restful.Route) {
|
||||||
|
api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
|
||||||
|
voidString := "void"
|
||||||
|
for _, route := range routes {
|
||||||
|
operation := Operation{
|
||||||
|
Method: route.Method,
|
||||||
|
Summary: route.Doc,
|
||||||
|
Notes: route.Notes,
|
||||||
|
// Type gets overwritten if there is a write sample
|
||||||
|
DataTypeFields: DataTypeFields{Type: &voidString},
|
||||||
|
Parameters: []Parameter{},
|
||||||
|
Nickname: route.Operation,
|
||||||
|
ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
|
||||||
|
|
||||||
|
operation.Consumes = route.Consumes
|
||||||
|
operation.Produces = route.Produces
|
||||||
|
|
||||||
|
// share root params if any
|
||||||
|
for _, swparam := range rootParams {
|
||||||
|
operation.Parameters = append(operation.Parameters, swparam)
|
||||||
|
}
|
||||||
|
// route specific params
|
||||||
|
for _, param := range route.ParameterDocs {
|
||||||
|
operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
|
||||||
|
}
|
||||||
|
|
||||||
|
sws.addModelsFromRouteTo(&operation, route, &decl)
|
||||||
|
api.Operations = append(api.Operations, operation)
|
||||||
|
}
|
||||||
|
decl.Apis = append(decl.Apis, api)
|
||||||
|
})
|
||||||
|
return decl
|
||||||
|
}
|
||||||
|
|
||||||
|
func withoutWildcard(path string) string {
|
||||||
|
if strings.HasSuffix(path, ":*}") {
|
||||||
|
return path[0:len(path)-3] + "}"
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
|
||||||
|
func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
|
||||||
|
if route.ResponseErrors == nil {
|
||||||
|
return messages
|
||||||
|
}
|
||||||
|
// sort by code
|
||||||
|
codes := sort.IntSlice{}
|
||||||
|
for code := range route.ResponseErrors {
|
||||||
|
codes = append(codes, code)
|
||||||
|
}
|
||||||
|
codes.Sort()
|
||||||
|
for _, code := range codes {
|
||||||
|
each := route.ResponseErrors[code]
|
||||||
|
message := ResponseMessage{
|
||||||
|
Code: code,
|
||||||
|
Message: each.Message,
|
||||||
|
}
|
||||||
|
if each.Model != nil {
|
||||||
|
st := reflect.TypeOf(each.Model)
|
||||||
|
isCollection, st := detectCollectionType(st)
|
||||||
|
// collection cannot be in responsemodel
|
||||||
|
if !isCollection {
|
||||||
|
modelName := modelBuilder{}.keyFrom(st)
|
||||||
|
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
|
||||||
|
message.ResponseModel = modelName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
messages = append(messages, message)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
|
||||||
|
func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
|
||||||
|
if route.ReadSample != nil {
|
||||||
|
sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
|
||||||
|
}
|
||||||
|
if route.WriteSample != nil {
|
||||||
|
sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
|
||||||
|
isCollection := false
|
||||||
|
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
|
||||||
|
st = st.Elem()
|
||||||
|
isCollection = true
|
||||||
|
} else {
|
||||||
|
if st.Kind() == reflect.Ptr {
|
||||||
|
if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
|
||||||
|
st = st.Elem().Elem()
|
||||||
|
isCollection = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return isCollection, st
|
||||||
|
}
|
||||||
|
|
||||||
|
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
|
||||||
|
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
|
||||||
|
mb := modelBuilder{Models: models, Config: &sws.config}
|
||||||
|
if isResponse {
|
||||||
|
sampleType, items := asDataType(sample, &sws.config)
|
||||||
|
operation.Type = sampleType
|
||||||
|
operation.Items = items
|
||||||
|
}
|
||||||
|
mb.addModelFrom(sample)
|
||||||
|
}
|
||||||
|
|
||||||
|
func asSwaggerParameter(param restful.ParameterData) Parameter {
|
||||||
|
return Parameter{
|
||||||
|
DataTypeFields: DataTypeFields{
|
||||||
|
Type: ¶m.DataType,
|
||||||
|
Format: asFormat(param.DataType, param.DataFormat),
|
||||||
|
DefaultValue: Special(param.DefaultValue),
|
||||||
|
},
|
||||||
|
Name: param.Name,
|
||||||
|
Description: param.Description,
|
||||||
|
ParamType: asParamType(param.Kind),
|
||||||
|
|
||||||
|
Required: param.Required}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Between 1..7 path parameters is supported
|
||||||
|
func composeRootPath(req *restful.Request) string {
|
||||||
|
path := "/" + req.PathParameter("a")
|
||||||
|
b := req.PathParameter("b")
|
||||||
|
if b == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + b
|
||||||
|
c := req.PathParameter("c")
|
||||||
|
if c == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + c
|
||||||
|
d := req.PathParameter("d")
|
||||||
|
if d == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + d
|
||||||
|
e := req.PathParameter("e")
|
||||||
|
if e == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + e
|
||||||
|
f := req.PathParameter("f")
|
||||||
|
if f == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
path = path + "/" + f
|
||||||
|
g := req.PathParameter("g")
|
||||||
|
if g == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
return path + "/" + g
|
||||||
|
}
|
||||||
|
|
||||||
|
func asFormat(dataType string, dataFormat string) string {
|
||||||
|
if dataFormat != "" {
|
||||||
|
return dataFormat
|
||||||
|
}
|
||||||
|
return "" // TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
func asParamType(kind int) string {
|
||||||
|
switch {
|
||||||
|
case kind == restful.PathParameterKind:
|
||||||
|
return "path"
|
||||||
|
case kind == restful.QueryParameterKind:
|
||||||
|
return "query"
|
||||||
|
case kind == restful.BodyParameterKind:
|
||||||
|
return "body"
|
||||||
|
case kind == restful.HeaderParameterKind:
|
||||||
|
return "header"
|
||||||
|
case kind == restful.FormParameterKind:
|
||||||
|
return "form"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func asDataType(any interface{}, config *Config) (*string, *Item) {
|
||||||
|
// If it's not a collection, return the suggested model name
|
||||||
|
st := reflect.TypeOf(any)
|
||||||
|
isCollection, st := detectCollectionType(st)
|
||||||
|
modelName := modelBuilder{}.keyFrom(st)
|
||||||
|
// if it's not a collection we are done
|
||||||
|
if !isCollection {
|
||||||
|
return &modelName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: This is not very elegant
|
||||||
|
// We create an Item object referring to the given model
|
||||||
|
models := ModelList{}
|
||||||
|
mb := modelBuilder{Models: &models, Config: config}
|
||||||
|
mb.addModelFrom(any)
|
||||||
|
|
||||||
|
elemTypeName := mb.getElementTypeName(modelName, "", st)
|
||||||
|
item := new(Item)
|
||||||
|
if mb.isPrimitiveType(elemTypeName) {
|
||||||
|
mapped := mb.jsonSchemaType(elemTypeName)
|
||||||
|
item.Type = &mapped
|
||||||
|
} else {
|
||||||
|
item.Ref = &elemTypeName
|
||||||
|
}
|
||||||
|
tmp := "array"
|
||||||
|
return &tmp, item
|
||||||
|
}
|
|
@ -0,0 +1,74 @@
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as
|
||||||
|
contributors and maintainers pledge to making participation in our project and
|
||||||
|
our community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity and
|
||||||
|
orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment
|
||||||
|
include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||||
|
advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic
|
||||||
|
address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable
|
||||||
|
behavior and are expected to take appropriate and fair corrective action in
|
||||||
|
response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or
|
||||||
|
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||||
|
permanently any contributor for other behaviors that they deem inappropriate,
|
||||||
|
threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community. Examples of
|
||||||
|
representing a project or community include using an official project e-mail
|
||||||
|
address, posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event. Representation of a project may be
|
||||||
|
further defined and clarified by project maintainers.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||||
|
complaints will be reviewed and investigated and will result in a response that
|
||||||
|
is deemed necessary and appropriate to the circumstances. The project team is
|
||||||
|
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||||
|
Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||||
|
faith may face temporary or permanent repercussions as determined by other
|
||||||
|
members of the project's leadership.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||||
|
available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/4/
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,6 @@
|
||||||
|
# OpenAPI initiative analysis [![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||||
|
|
||||||
|
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis)
|
||||||
|
|
||||||
|
|
||||||
|
A foundational library to analyze an OAI specification document for easier reasoning about the content.
|
|
@ -0,0 +1,785 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package analysis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
slashpath "path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/go-openapi/jsonpointer"
|
||||||
|
"github.com/go-openapi/spec"
|
||||||
|
"github.com/go-openapi/swag"
|
||||||
|
)
|
||||||
|
|
||||||
|
type referenceAnalysis struct {
|
||||||
|
schemas map[string]spec.Ref
|
||||||
|
responses map[string]spec.Ref
|
||||||
|
parameters map[string]spec.Ref
|
||||||
|
items map[string]spec.Ref
|
||||||
|
headerItems map[string]spec.Ref
|
||||||
|
parameterItems map[string]spec.Ref
|
||||||
|
allRefs map[string]spec.Ref
|
||||||
|
pathItems map[string]spec.Ref
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
|
||||||
|
r.allRefs["#"+key] = ref
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) {
|
||||||
|
r.items["#"+key] = items.Ref
|
||||||
|
r.addRef(key, items.Ref)
|
||||||
|
if location == "header" {
|
||||||
|
r.headerItems["#"+key] = items.Ref
|
||||||
|
} else {
|
||||||
|
r.parameterItems["#"+key] = items.Ref
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
|
||||||
|
r.schemas["#"+key] = ref.Schema.Ref
|
||||||
|
r.addRef(key, ref.Schema.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
|
||||||
|
r.responses["#"+key] = resp.Ref
|
||||||
|
r.addRef(key, resp.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
|
||||||
|
r.parameters["#"+key] = param.Ref
|
||||||
|
r.addRef(key, param.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) {
|
||||||
|
r.pathItems["#"+key] = pathItem.Ref
|
||||||
|
r.addRef(key, pathItem.Ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
type patternAnalysis struct {
|
||||||
|
parameters map[string]string
|
||||||
|
headers map[string]string
|
||||||
|
items map[string]string
|
||||||
|
schemas map[string]string
|
||||||
|
allPatterns map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *patternAnalysis) addPattern(key, pattern string) {
|
||||||
|
p.allPatterns["#"+key] = pattern
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *patternAnalysis) addParameterPattern(key, pattern string) {
|
||||||
|
p.parameters["#"+key] = pattern
|
||||||
|
p.addPattern(key, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *patternAnalysis) addHeaderPattern(key, pattern string) {
|
||||||
|
p.headers["#"+key] = pattern
|
||||||
|
p.addPattern(key, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *patternAnalysis) addItemsPattern(key, pattern string) {
|
||||||
|
p.items["#"+key] = pattern
|
||||||
|
p.addPattern(key, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *patternAnalysis) addSchemaPattern(key, pattern string) {
|
||||||
|
p.schemas["#"+key] = pattern
|
||||||
|
p.addPattern(key, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New takes a swagger spec object and returns an analyzed spec document.
|
||||||
|
// The analyzed document contains a number of indices that make it easier to
|
||||||
|
// reason about semantics of a swagger specification for use in code generation
|
||||||
|
// or validation etc.
|
||||||
|
func New(doc *spec.Swagger) *Spec {
|
||||||
|
a := &Spec{
|
||||||
|
spec: doc,
|
||||||
|
consumes: make(map[string]struct{}, 150),
|
||||||
|
produces: make(map[string]struct{}, 150),
|
||||||
|
authSchemes: make(map[string]struct{}, 150),
|
||||||
|
operations: make(map[string]map[string]*spec.Operation, 150),
|
||||||
|
allSchemas: make(map[string]SchemaRef, 150),
|
||||||
|
allOfs: make(map[string]SchemaRef, 150),
|
||||||
|
references: referenceAnalysis{
|
||||||
|
schemas: make(map[string]spec.Ref, 150),
|
||||||
|
pathItems: make(map[string]spec.Ref, 150),
|
||||||
|
responses: make(map[string]spec.Ref, 150),
|
||||||
|
parameters: make(map[string]spec.Ref, 150),
|
||||||
|
items: make(map[string]spec.Ref, 150),
|
||||||
|
headerItems: make(map[string]spec.Ref, 150),
|
||||||
|
parameterItems: make(map[string]spec.Ref, 150),
|
||||||
|
allRefs: make(map[string]spec.Ref, 150),
|
||||||
|
},
|
||||||
|
patterns: patternAnalysis{
|
||||||
|
parameters: make(map[string]string, 150),
|
||||||
|
headers: make(map[string]string, 150),
|
||||||
|
items: make(map[string]string, 150),
|
||||||
|
schemas: make(map[string]string, 150),
|
||||||
|
allPatterns: make(map[string]string, 150),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
a.initialize()
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spec takes a swagger spec object and turns it into a registry
|
||||||
|
// with a bunch of utility methods to act on the information in the spec
|
||||||
|
type Spec struct {
|
||||||
|
spec *spec.Swagger
|
||||||
|
consumes map[string]struct{}
|
||||||
|
produces map[string]struct{}
|
||||||
|
authSchemes map[string]struct{}
|
||||||
|
operations map[string]map[string]*spec.Operation
|
||||||
|
references referenceAnalysis
|
||||||
|
patterns patternAnalysis
|
||||||
|
allSchemas map[string]SchemaRef
|
||||||
|
allOfs map[string]SchemaRef
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) reset() {
|
||||||
|
s.consumes = make(map[string]struct{}, 150)
|
||||||
|
s.produces = make(map[string]struct{}, 150)
|
||||||
|
s.authSchemes = make(map[string]struct{}, 150)
|
||||||
|
s.operations = make(map[string]map[string]*spec.Operation, 150)
|
||||||
|
s.allSchemas = make(map[string]SchemaRef, 150)
|
||||||
|
s.allOfs = make(map[string]SchemaRef, 150)
|
||||||
|
s.references.schemas = make(map[string]spec.Ref, 150)
|
||||||
|
s.references.pathItems = make(map[string]spec.Ref, 150)
|
||||||
|
s.references.responses = make(map[string]spec.Ref, 150)
|
||||||
|
s.references.parameters = make(map[string]spec.Ref, 150)
|
||||||
|
s.references.items = make(map[string]spec.Ref, 150)
|
||||||
|
s.references.headerItems = make(map[string]spec.Ref, 150)
|
||||||
|
s.references.parameterItems = make(map[string]spec.Ref, 150)
|
||||||
|
s.references.allRefs = make(map[string]spec.Ref, 150)
|
||||||
|
s.patterns.parameters = make(map[string]string, 150)
|
||||||
|
s.patterns.headers = make(map[string]string, 150)
|
||||||
|
s.patterns.items = make(map[string]string, 150)
|
||||||
|
s.patterns.schemas = make(map[string]string, 150)
|
||||||
|
s.patterns.allPatterns = make(map[string]string, 150)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) reload() {
|
||||||
|
s.reset()
|
||||||
|
s.initialize()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) initialize() {
|
||||||
|
for _, c := range s.spec.Consumes {
|
||||||
|
s.consumes[c] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, c := range s.spec.Produces {
|
||||||
|
s.produces[c] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, ss := range s.spec.Security {
|
||||||
|
for k := range ss {
|
||||||
|
s.authSchemes[k] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for path, pathItem := range s.AllPaths() {
|
||||||
|
s.analyzeOperations(path, &pathItem)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, parameter := range s.spec.Parameters {
|
||||||
|
refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
|
||||||
|
if parameter.Items != nil {
|
||||||
|
s.analyzeItems("items", parameter.Items, refPref, "parameter")
|
||||||
|
}
|
||||||
|
if parameter.In == "body" && parameter.Schema != nil {
|
||||||
|
s.analyzeSchema("schema", *parameter.Schema, refPref)
|
||||||
|
}
|
||||||
|
if parameter.Pattern != "" {
|
||||||
|
s.patterns.addParameterPattern(refPref, parameter.Pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, response := range s.spec.Responses {
|
||||||
|
refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
|
||||||
|
for k, v := range response.Headers {
|
||||||
|
hRefPref := slashpath.Join(refPref, "headers", k)
|
||||||
|
if v.Items != nil {
|
||||||
|
s.analyzeItems("items", v.Items, hRefPref, "header")
|
||||||
|
}
|
||||||
|
if v.Pattern != "" {
|
||||||
|
s.patterns.addHeaderPattern(hRefPref, v.Pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if response.Schema != nil {
|
||||||
|
s.analyzeSchema("schema", *response.Schema, refPref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, schema := range s.spec.Definitions {
|
||||||
|
s.analyzeSchema(name, schema, "/definitions")
|
||||||
|
}
|
||||||
|
// TODO: after analyzing all things and flattening schemas etc
|
||||||
|
// resolve all the collected references to their final representations
|
||||||
|
// best put in a separate method because this could get expensive
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
|
||||||
|
// TODO: resolve refs here?
|
||||||
|
op := pi
|
||||||
|
if pi.Ref.String() != "" {
|
||||||
|
key := slashpath.Join("/paths", jsonpointer.Escape(path))
|
||||||
|
s.references.addPathItemRef(key, pi)
|
||||||
|
}
|
||||||
|
s.analyzeOperation("GET", path, op.Get)
|
||||||
|
s.analyzeOperation("PUT", path, op.Put)
|
||||||
|
s.analyzeOperation("POST", path, op.Post)
|
||||||
|
s.analyzeOperation("PATCH", path, op.Patch)
|
||||||
|
s.analyzeOperation("DELETE", path, op.Delete)
|
||||||
|
s.analyzeOperation("HEAD", path, op.Head)
|
||||||
|
s.analyzeOperation("OPTIONS", path, op.Options)
|
||||||
|
for i, param := range op.Parameters {
|
||||||
|
refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
|
||||||
|
if param.Ref.String() != "" {
|
||||||
|
s.references.addParamRef(refPref, ¶m)
|
||||||
|
}
|
||||||
|
if param.Pattern != "" {
|
||||||
|
s.patterns.addParameterPattern(refPref, param.Pattern)
|
||||||
|
}
|
||||||
|
if param.Items != nil {
|
||||||
|
s.analyzeItems("items", param.Items, refPref, "parameter")
|
||||||
|
}
|
||||||
|
if param.Schema != nil {
|
||||||
|
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) {
|
||||||
|
if items == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
refPref := slashpath.Join(prefix, name)
|
||||||
|
s.analyzeItems(name, items.Items, refPref, location)
|
||||||
|
if items.Ref.String() != "" {
|
||||||
|
s.references.addItemsRef(refPref, items, location)
|
||||||
|
}
|
||||||
|
if items.Pattern != "" {
|
||||||
|
s.patterns.addItemsPattern(refPref, items.Pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
|
||||||
|
if op == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range op.Consumes {
|
||||||
|
s.consumes[c] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, c := range op.Produces {
|
||||||
|
s.produces[c] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, ss := range op.Security {
|
||||||
|
for k := range ss {
|
||||||
|
s.authSchemes[k] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := s.operations[method]; !ok {
|
||||||
|
s.operations[method] = make(map[string]*spec.Operation)
|
||||||
|
}
|
||||||
|
s.operations[method][path] = op
|
||||||
|
prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
|
||||||
|
for i, param := range op.Parameters {
|
||||||
|
refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
|
||||||
|
if param.Ref.String() != "" {
|
||||||
|
s.references.addParamRef(refPref, ¶m)
|
||||||
|
}
|
||||||
|
if param.Pattern != "" {
|
||||||
|
s.patterns.addParameterPattern(refPref, param.Pattern)
|
||||||
|
}
|
||||||
|
s.analyzeItems("items", param.Items, refPref, "parameter")
|
||||||
|
if param.In == "body" && param.Schema != nil {
|
||||||
|
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if op.Responses != nil {
|
||||||
|
if op.Responses.Default != nil {
|
||||||
|
refPref := slashpath.Join(prefix, "responses", "default")
|
||||||
|
if op.Responses.Default.Ref.String() != "" {
|
||||||
|
s.references.addResponseRef(refPref, op.Responses.Default)
|
||||||
|
}
|
||||||
|
for k, v := range op.Responses.Default.Headers {
|
||||||
|
hRefPref := slashpath.Join(refPref, "headers", k)
|
||||||
|
s.analyzeItems("items", v.Items, hRefPref, "header")
|
||||||
|
if v.Pattern != "" {
|
||||||
|
s.patterns.addHeaderPattern(hRefPref, v.Pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if op.Responses.Default.Schema != nil {
|
||||||
|
s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, res := range op.Responses.StatusCodeResponses {
|
||||||
|
refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
|
||||||
|
if res.Ref.String() != "" {
|
||||||
|
s.references.addResponseRef(refPref, &res)
|
||||||
|
}
|
||||||
|
for k, v := range res.Headers {
|
||||||
|
hRefPref := slashpath.Join(refPref, "headers", k)
|
||||||
|
s.analyzeItems("items", v.Items, hRefPref, "header")
|
||||||
|
if v.Pattern != "" {
|
||||||
|
s.patterns.addHeaderPattern(hRefPref, v.Pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res.Schema != nil {
|
||||||
|
s.analyzeSchema("schema", *res.Schema, refPref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) {
|
||||||
|
refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
|
||||||
|
schRef := SchemaRef{
|
||||||
|
Name: name,
|
||||||
|
Schema: &schema,
|
||||||
|
Ref: spec.MustCreateRef("#" + refURI),
|
||||||
|
TopLevel: prefix == "/definitions",
|
||||||
|
}
|
||||||
|
|
||||||
|
s.allSchemas["#"+refURI] = schRef
|
||||||
|
|
||||||
|
if schema.Ref.String() != "" {
|
||||||
|
s.references.addSchemaRef(refURI, schRef)
|
||||||
|
}
|
||||||
|
if schema.Pattern != "" {
|
||||||
|
s.patterns.addSchemaPattern(refURI, schema.Pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range schema.Definitions {
|
||||||
|
s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions"))
|
||||||
|
}
|
||||||
|
for k, v := range schema.Properties {
|
||||||
|
s.analyzeSchema(k, v, slashpath.Join(refURI, "properties"))
|
||||||
|
}
|
||||||
|
for k, v := range schema.PatternProperties {
|
||||||
|
s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties"))
|
||||||
|
}
|
||||||
|
for i, v := range schema.AllOf {
|
||||||
|
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
|
||||||
|
}
|
||||||
|
if len(schema.AllOf) > 0 {
|
||||||
|
s.allOfs["#"+refURI] = schRef
|
||||||
|
}
|
||||||
|
for i, v := range schema.AnyOf {
|
||||||
|
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
|
||||||
|
}
|
||||||
|
for i, v := range schema.OneOf {
|
||||||
|
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
|
||||||
|
}
|
||||||
|
if schema.Not != nil {
|
||||||
|
s.analyzeSchema("not", *schema.Not, refURI)
|
||||||
|
}
|
||||||
|
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
|
||||||
|
s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI)
|
||||||
|
}
|
||||||
|
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
|
||||||
|
s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI)
|
||||||
|
}
|
||||||
|
if schema.Items != nil {
|
||||||
|
if schema.Items.Schema != nil {
|
||||||
|
s.analyzeSchema("items", *schema.Items.Schema, refURI)
|
||||||
|
}
|
||||||
|
for i, sch := range schema.Items.Schemas {
|
||||||
|
s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecurityRequirement is a representation of a security requirement for an operation
|
||||||
|
type SecurityRequirement struct {
|
||||||
|
Name string
|
||||||
|
Scopes []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecurityRequirementsFor gets the security requirements for the operation
|
||||||
|
func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement {
|
||||||
|
if s.spec.Security == nil && operation.Security == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
schemes := s.spec.Security
|
||||||
|
if operation.Security != nil {
|
||||||
|
schemes = operation.Security
|
||||||
|
}
|
||||||
|
|
||||||
|
unique := make(map[string]SecurityRequirement)
|
||||||
|
for _, scheme := range schemes {
|
||||||
|
for k, v := range scheme {
|
||||||
|
if _, ok := unique[k]; !ok {
|
||||||
|
unique[k] = SecurityRequirement{Name: k, Scopes: v}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var result []SecurityRequirement
|
||||||
|
for _, v := range unique {
|
||||||
|
result = append(result, v)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
|
||||||
|
func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
|
||||||
|
requirements := s.SecurityRequirementsFor(operation)
|
||||||
|
if len(requirements) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
result := make(map[string]spec.SecurityScheme)
|
||||||
|
for _, v := range requirements {
|
||||||
|
if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
|
||||||
|
if definition != nil {
|
||||||
|
result[v.Name] = *definition
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumesFor gets the mediatypes for the operation
|
||||||
|
func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
|
||||||
|
|
||||||
|
if len(operation.Consumes) == 0 {
|
||||||
|
cons := make(map[string]struct{}, len(s.spec.Consumes))
|
||||||
|
for _, k := range s.spec.Consumes {
|
||||||
|
cons[k] = struct{}{}
|
||||||
|
}
|
||||||
|
return s.structMapKeys(cons)
|
||||||
|
}
|
||||||
|
|
||||||
|
cons := make(map[string]struct{}, len(operation.Consumes))
|
||||||
|
for _, c := range operation.Consumes {
|
||||||
|
cons[c] = struct{}{}
|
||||||
|
}
|
||||||
|
return s.structMapKeys(cons)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProducesFor gets the mediatypes for the operation
|
||||||
|
func (s *Spec) ProducesFor(operation *spec.Operation) []string {
|
||||||
|
if len(operation.Produces) == 0 {
|
||||||
|
prod := make(map[string]struct{}, len(s.spec.Produces))
|
||||||
|
for _, k := range s.spec.Produces {
|
||||||
|
prod[k] = struct{}{}
|
||||||
|
}
|
||||||
|
return s.structMapKeys(prod)
|
||||||
|
}
|
||||||
|
|
||||||
|
prod := make(map[string]struct{}, len(operation.Produces))
|
||||||
|
for _, c := range operation.Produces {
|
||||||
|
prod[c] = struct{}{}
|
||||||
|
}
|
||||||
|
return s.structMapKeys(prod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mapKeyFromParam(param *spec.Parameter) string {
|
||||||
|
return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
|
||||||
|
}
|
||||||
|
|
||||||
|
func fieldNameFromParam(param *spec.Parameter) string {
|
||||||
|
if nm, ok := param.Extensions.GetString("go-name"); ok {
|
||||||
|
return nm
|
||||||
|
}
|
||||||
|
return swag.ToGoName(param.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) {
|
||||||
|
for _, param := range parameters {
|
||||||
|
pr := param
|
||||||
|
if pr.Ref.String() != "" {
|
||||||
|
obj, _, err := pr.Ref.GetPointer().Get(s.spec)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
pr = obj.(spec.Parameter)
|
||||||
|
}
|
||||||
|
res[mapKeyFromParam(&pr)] = pr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParametersFor the specified operation id
|
||||||
|
func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
|
||||||
|
gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
|
||||||
|
bag := make(map[string]spec.Parameter)
|
||||||
|
s.paramsAsMap(pi.Parameters, bag)
|
||||||
|
s.paramsAsMap(op.Parameters, bag)
|
||||||
|
|
||||||
|
var res []spec.Parameter
|
||||||
|
for _, v := range bag {
|
||||||
|
res = append(res, v)
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
for _, pi := range s.spec.Paths.Paths {
|
||||||
|
if pi.Get != nil && pi.Get.ID == operationID {
|
||||||
|
return gatherParams(&pi, pi.Get)
|
||||||
|
}
|
||||||
|
if pi.Head != nil && pi.Head.ID == operationID {
|
||||||
|
return gatherParams(&pi, pi.Head)
|
||||||
|
}
|
||||||
|
if pi.Options != nil && pi.Options.ID == operationID {
|
||||||
|
return gatherParams(&pi, pi.Options)
|
||||||
|
}
|
||||||
|
if pi.Post != nil && pi.Post.ID == operationID {
|
||||||
|
return gatherParams(&pi, pi.Post)
|
||||||
|
}
|
||||||
|
if pi.Patch != nil && pi.Patch.ID == operationID {
|
||||||
|
return gatherParams(&pi, pi.Patch)
|
||||||
|
}
|
||||||
|
if pi.Put != nil && pi.Put.ID == operationID {
|
||||||
|
return gatherParams(&pi, pi.Put)
|
||||||
|
}
|
||||||
|
if pi.Delete != nil && pi.Delete.ID == operationID {
|
||||||
|
return gatherParams(&pi, pi.Delete)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
|
||||||
|
// apply for the method and path.
|
||||||
|
func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
|
||||||
|
res := make(map[string]spec.Parameter)
|
||||||
|
if pi, ok := s.spec.Paths.Paths[path]; ok {
|
||||||
|
s.paramsAsMap(pi.Parameters, res)
|
||||||
|
s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res)
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationForName gets the operation for the given id
|
||||||
|
func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
|
||||||
|
for method, pathItem := range s.operations {
|
||||||
|
for path, op := range pathItem {
|
||||||
|
if operationID == op.ID {
|
||||||
|
return method, path, op, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", "", nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationFor the given method and path
|
||||||
|
func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
|
||||||
|
if mp, ok := s.operations[strings.ToUpper(method)]; ok {
|
||||||
|
op, fn := mp[path]
|
||||||
|
return op, fn
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Operations gathers all the operations specified in the spec document
|
||||||
|
func (s *Spec) Operations() map[string]map[string]*spec.Operation {
|
||||||
|
return s.operations
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
|
||||||
|
if len(mp) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]string, 0, len(mp))
|
||||||
|
for k := range mp {
|
||||||
|
result = append(result, k)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllPaths returns all the paths in the swagger spec
|
||||||
|
func (s *Spec) AllPaths() map[string]spec.PathItem {
|
||||||
|
if s.spec == nil || s.spec.Paths == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.spec.Paths.Paths
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationIDs gets all the operation ids based on method an dpath
|
||||||
|
func (s *Spec) OperationIDs() []string {
|
||||||
|
if len(s.operations) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
result := make([]string, 0, len(s.operations))
|
||||||
|
for method, v := range s.operations {
|
||||||
|
for p, o := range v {
|
||||||
|
if o.ID != "" {
|
||||||
|
result = append(result, o.ID)
|
||||||
|
} else {
|
||||||
|
result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationMethodPaths gets all the operation ids based on method an dpath
|
||||||
|
func (s *Spec) OperationMethodPaths() []string {
|
||||||
|
if len(s.operations) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
result := make([]string, 0, len(s.operations))
|
||||||
|
for method, v := range s.operations {
|
||||||
|
for p := range v {
|
||||||
|
result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredConsumes gets all the distinct consumes that are specified in the specification document
|
||||||
|
func (s *Spec) RequiredConsumes() []string {
|
||||||
|
return s.structMapKeys(s.consumes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredProduces gets all the distinct produces that are specified in the specification document
|
||||||
|
func (s *Spec) RequiredProduces() []string {
|
||||||
|
return s.structMapKeys(s.produces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
|
||||||
|
func (s *Spec) RequiredSecuritySchemes() []string {
|
||||||
|
return s.structMapKeys(s.authSchemes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchemaRef is a reference to a schema
|
||||||
|
type SchemaRef struct {
|
||||||
|
Name string
|
||||||
|
Ref spec.Ref
|
||||||
|
Schema *spec.Schema
|
||||||
|
TopLevel bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchemasWithAllOf returns schema references to all schemas that are defined
|
||||||
|
// with an allOf key
|
||||||
|
func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
|
||||||
|
for _, v := range s.allOfs {
|
||||||
|
result = append(result, v)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllDefinitions returns schema references for all the definitions that were discovered
|
||||||
|
func (s *Spec) AllDefinitions() (result []SchemaRef) {
|
||||||
|
for _, v := range s.allSchemas {
|
||||||
|
result = append(result, v)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllDefinitionReferences returns json refs for all the discovered schemas
|
||||||
|
func (s *Spec) AllDefinitionReferences() (result []string) {
|
||||||
|
for _, v := range s.references.schemas {
|
||||||
|
result = append(result, v.String())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllParameterReferences returns json refs for all the discovered parameters
|
||||||
|
func (s *Spec) AllParameterReferences() (result []string) {
|
||||||
|
for _, v := range s.references.parameters {
|
||||||
|
result = append(result, v.String())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllResponseReferences returns json refs for all the discovered responses
|
||||||
|
func (s *Spec) AllResponseReferences() (result []string) {
|
||||||
|
for _, v := range s.references.responses {
|
||||||
|
result = append(result, v.String())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllPathItemReferences returns the references for all the items
|
||||||
|
func (s *Spec) AllPathItemReferences() (result []string) {
|
||||||
|
for _, v := range s.references.pathItems {
|
||||||
|
result = append(result, v.String())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllItemsReferences returns the references for all the items
|
||||||
|
func (s *Spec) AllItemsReferences() (result []string) {
|
||||||
|
for _, v := range s.references.items {
|
||||||
|
result = append(result, v.String())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllReferences returns all the references found in the document
|
||||||
|
func (s *Spec) AllReferences() (result []string) {
|
||||||
|
for _, v := range s.references.allRefs {
|
||||||
|
result = append(result, v.String())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllRefs returns all the unique references found in the document
|
||||||
|
func (s *Spec) AllRefs() (result []spec.Ref) {
|
||||||
|
set := make(map[string]struct{})
|
||||||
|
for _, v := range s.references.allRefs {
|
||||||
|
a := v.String()
|
||||||
|
if a == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := set[a]; !ok {
|
||||||
|
set[a] = struct{}{}
|
||||||
|
result = append(result, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func cloneStringMap(source map[string]string) map[string]string {
|
||||||
|
res := make(map[string]string, len(source))
|
||||||
|
for k, v := range source {
|
||||||
|
res[k] = v
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParameterPatterns returns all the patterns found in parameters
|
||||||
|
// the map is cloned to avoid accidental changes
|
||||||
|
func (s *Spec) ParameterPatterns() map[string]string {
|
||||||
|
return cloneStringMap(s.patterns.parameters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderPatterns returns all the patterns found in response headers
|
||||||
|
// the map is cloned to avoid accidental changes
|
||||||
|
func (s *Spec) HeaderPatterns() map[string]string {
|
||||||
|
return cloneStringMap(s.patterns.headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ItemsPatterns returns all the patterns found in simple array items
|
||||||
|
// the map is cloned to avoid accidental changes
|
||||||
|
func (s *Spec) ItemsPatterns() map[string]string {
|
||||||
|
return cloneStringMap(s.patterns.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchemaPatterns returns all the patterns found in schemas
|
||||||
|
// the map is cloned to avoid accidental changes
|
||||||
|
func (s *Spec) SchemaPatterns() map[string]string {
|
||||||
|
return cloneStringMap(s.patterns.schemas)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllPatterns returns all the patterns found in the spec
|
||||||
|
// the map is cloned to avoid accidental changes
|
||||||
|
func (s *Spec) AllPatterns() map[string]string {
|
||||||
|
return cloneStringMap(s.patterns.allPatterns)
|
||||||
|
}
|
|
@ -0,0 +1,756 @@
|
||||||
|
package analysis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/go-openapi/jsonpointer"
|
||||||
|
swspec "github.com/go-openapi/spec"
|
||||||
|
"github.com/go-openapi/swag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FlattenOpts configuration for flattening a swagger specification.
|
||||||
|
type FlattenOpts struct {
|
||||||
|
Spec *Spec
|
||||||
|
BasePath string
|
||||||
|
|
||||||
|
_ struct{} // require keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document.
|
||||||
|
func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *swspec.ExpandOptions {
|
||||||
|
return &swspec.ExpandOptions{RelativeBase: f.BasePath, SkipSchemas: skipSchemas}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swagger gets the swagger specification for this flatten operation
|
||||||
|
func (f *FlattenOpts) Swagger() *swspec.Swagger {
|
||||||
|
return f.Spec.spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flatten an analyzed spec.
|
||||||
|
//
|
||||||
|
// To flatten a spec means:
|
||||||
|
//
|
||||||
|
// Expand the parameters, responses, path items, parameter items and header items.
|
||||||
|
// Import external (http, file) references so they become internal to the document.
|
||||||
|
// Move every inline schema to be a definition with an auto-generated name in a depth-first fashion.
|
||||||
|
// Rewritten schemas get a vendor extension x-go-gen-location so we know in which package they need to be rendered.
|
||||||
|
func Flatten(opts FlattenOpts) error {
|
||||||
|
// recursively expand responses, parameters, path items and items
|
||||||
|
err := swspec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(true))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.Spec.reload() // re-analyze
|
||||||
|
|
||||||
|
// at this point there are no other references left but schemas
|
||||||
|
if err := importExternalReferences(&opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.Spec.reload() // re-analyze
|
||||||
|
|
||||||
|
// rewrite the inline schemas (schemas that aren't simple types or arrays of simple types)
|
||||||
|
if err := nameInlinedSchemas(&opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.Spec.reload() // re-analyze
|
||||||
|
|
||||||
|
// TODO: simplifiy known schema patterns to flat objects with properties?
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func nameInlinedSchemas(opts *FlattenOpts) error {
|
||||||
|
namer := &inlineSchemaNamer{Spec: opts.Swagger(), Operations: opRefsByRef(gatherOperations(opts.Spec, nil))}
|
||||||
|
depthFirst := sortDepthFirst(opts.Spec.allSchemas)
|
||||||
|
|
||||||
|
for _, key := range depthFirst {
|
||||||
|
sch := opts.Spec.allSchemas[key]
|
||||||
|
if sch.Schema != nil && sch.Schema.Ref.String() == "" && !sch.TopLevel { // inline schema
|
||||||
|
asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("schema analysis [%s]: %v", sch.Ref.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !asch.IsSimpleSchema { // complex schemas get moved
|
||||||
|
if err := namer.Name(key, sch.Schema, asch); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var depthGroupOrder = []string{"sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition"}
|
||||||
|
|
||||||
|
func sortDepthFirst(data map[string]SchemaRef) (sorted []string) {
|
||||||
|
// group by category (shared params, op param, statuscode response, default response, definitions)
|
||||||
|
// sort groups internally by number of parts in the key and lexical names
|
||||||
|
// flatten groups into a single list of keys
|
||||||
|
grouped := make(map[string]keys, len(data))
|
||||||
|
for k := range data {
|
||||||
|
split := keyParts(k)
|
||||||
|
var pk string
|
||||||
|
if split.IsSharedOperationParam() {
|
||||||
|
pk = "sharedOpParam"
|
||||||
|
}
|
||||||
|
if split.IsOperationParam() {
|
||||||
|
pk = "opParam"
|
||||||
|
}
|
||||||
|
if split.IsStatusCodeResponse() {
|
||||||
|
pk = "codeResponse"
|
||||||
|
}
|
||||||
|
if split.IsDefaultResponse() {
|
||||||
|
pk = "defaultResponse"
|
||||||
|
}
|
||||||
|
if split.IsDefinition() {
|
||||||
|
pk = "definition"
|
||||||
|
}
|
||||||
|
grouped[pk] = append(grouped[pk], key{len(split), k})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pk := range depthGroupOrder {
|
||||||
|
res := grouped[pk]
|
||||||
|
sort.Sort(res)
|
||||||
|
for _, v := range res {
|
||||||
|
sorted = append(sorted, v.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type key struct {
|
||||||
|
Segments int
|
||||||
|
Key string
|
||||||
|
}
|
||||||
|
type keys []key
|
||||||
|
|
||||||
|
func (k keys) Len() int { return len(k) }
|
||||||
|
func (k keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] }
|
||||||
|
func (k keys) Less(i, j int) bool {
|
||||||
|
return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
type inlineSchemaNamer struct {
|
||||||
|
Spec *swspec.Swagger
|
||||||
|
Operations map[string]opRef
|
||||||
|
}
|
||||||
|
|
||||||
|
func opRefsByRef(oprefs map[string]opRef) map[string]opRef {
|
||||||
|
result := make(map[string]opRef, len(oprefs))
|
||||||
|
for _, v := range oprefs {
|
||||||
|
result[v.Ref.String()] = v
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (isn *inlineSchemaNamer) Name(key string, schema *swspec.Schema, aschema *AnalyzedSchema) error {
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Printf("naming inlined schema at %s", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := keyParts(key)
|
||||||
|
for _, name := range namesFromKey(parts, aschema, isn.Operations) {
|
||||||
|
if name != "" {
|
||||||
|
// create unique name
|
||||||
|
newName := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name))
|
||||||
|
|
||||||
|
// clone schema
|
||||||
|
sch, err := cloneSchema(schema)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// replace values on schema
|
||||||
|
if err := rewriteSchemaToRef(isn.Spec, key, swspec.MustCreateRef("#/definitions/"+newName)); err != nil {
|
||||||
|
return fmt.Errorf("name inlined schema: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sch.AddExtension("x-go-gen-location", genLocation(parts))
|
||||||
|
// fmt.Printf("{\n %q,\n \"\",\n spec.MustCreateRef(%q),\n \"\",\n},\n", key, "#/definitions/"+newName)
|
||||||
|
// save cloned schema to definitions
|
||||||
|
saveSchema(isn.Spec, newName, sch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func genLocation(parts splitKey) string {
|
||||||
|
if parts.IsOperation() {
|
||||||
|
return "operations"
|
||||||
|
}
|
||||||
|
if parts.IsDefinition() {
|
||||||
|
return "models"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func uniqifyName(definitions swspec.Definitions, name string) string {
|
||||||
|
if name == "" {
|
||||||
|
name = "oaiGen"
|
||||||
|
}
|
||||||
|
if len(definitions) == 0 {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := definitions[name]; !ok {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
name += "OAIGen"
|
||||||
|
var idx int
|
||||||
|
unique := name
|
||||||
|
_, known := definitions[unique]
|
||||||
|
for known {
|
||||||
|
idx++
|
||||||
|
unique = fmt.Sprintf("%s%d", name, idx)
|
||||||
|
_, known = definitions[unique]
|
||||||
|
}
|
||||||
|
return unique
|
||||||
|
}
|
||||||
|
|
||||||
|
func namesFromKey(parts splitKey, aschema *AnalyzedSchema, operations map[string]opRef) []string {
|
||||||
|
var baseNames [][]string
|
||||||
|
var startIndex int
|
||||||
|
if parts.IsOperation() {
|
||||||
|
// params
|
||||||
|
if parts.IsOperationParam() || parts.IsSharedOperationParam() {
|
||||||
|
piref := parts.PathItemRef()
|
||||||
|
if piref.String() != "" && parts.IsOperationParam() {
|
||||||
|
if op, ok := operations[piref.String()]; ok {
|
||||||
|
startIndex = 5
|
||||||
|
baseNames = append(baseNames, []string{op.ID, "params", "body"})
|
||||||
|
}
|
||||||
|
} else if parts.IsSharedOperationParam() {
|
||||||
|
pref := parts.PathRef()
|
||||||
|
for k, v := range operations {
|
||||||
|
if strings.HasPrefix(k, pref.String()) {
|
||||||
|
startIndex = 4
|
||||||
|
baseNames = append(baseNames, []string{v.ID, "params", "body"})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// responses
|
||||||
|
if parts.IsOperationResponse() {
|
||||||
|
piref := parts.PathItemRef()
|
||||||
|
if piref.String() != "" {
|
||||||
|
if op, ok := operations[piref.String()]; ok {
|
||||||
|
startIndex = 6
|
||||||
|
baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// definitions
|
||||||
|
if parts.IsDefinition() {
|
||||||
|
nm := parts.DefinitionName()
|
||||||
|
if nm != "" {
|
||||||
|
startIndex = 2
|
||||||
|
baseNames = append(baseNames, []string{parts.DefinitionName()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var result []string
|
||||||
|
for _, segments := range baseNames {
|
||||||
|
nm := parts.BuildName(segments, startIndex, aschema)
|
||||||
|
if nm != "" {
|
||||||
|
result = append(result, nm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(result)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
pths = "paths"
|
||||||
|
responses = "responses"
|
||||||
|
parameters = "parameters"
|
||||||
|
definitions = "definitions"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ignoredKeys map[string]struct{}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ignoredKeys = map[string]struct{}{
|
||||||
|
"schema": {},
|
||||||
|
"properties": {},
|
||||||
|
"not": {},
|
||||||
|
"anyOf": {},
|
||||||
|
"oneOf": {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type splitKey []string
|
||||||
|
|
||||||
|
func (s splitKey) IsDefinition() bool {
|
||||||
|
return len(s) > 1 && s[0] == definitions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) DefinitionName() string {
|
||||||
|
if !s.IsDefinition() {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return s[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) BuildName(segments []string, startIndex int, aschema *AnalyzedSchema) string {
|
||||||
|
for _, part := range s[startIndex:] {
|
||||||
|
if _, ignored := ignoredKeys[part]; !ignored {
|
||||||
|
if part == "items" || part == "additionalItems" {
|
||||||
|
if aschema.IsTuple || aschema.IsTupleWithExtra {
|
||||||
|
segments = append(segments, "tuple")
|
||||||
|
} else {
|
||||||
|
segments = append(segments, "items")
|
||||||
|
}
|
||||||
|
if part == "additionalItems" {
|
||||||
|
segments = append(segments, part)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
segments = append(segments, part)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(segments, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) IsOperation() bool {
|
||||||
|
return len(s) > 1 && s[0] == pths
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) IsSharedOperationParam() bool {
|
||||||
|
return len(s) > 2 && s[0] == pths && s[2] == parameters
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) IsOperationParam() bool {
|
||||||
|
return len(s) > 3 && s[0] == pths && s[3] == parameters
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) IsOperationResponse() bool {
|
||||||
|
return len(s) > 3 && s[0] == pths && s[3] == responses
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) IsDefaultResponse() bool {
|
||||||
|
return len(s) > 4 && s[0] == pths && s[3] == responses && s[4] == "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) IsStatusCodeResponse() bool {
|
||||||
|
isInt := func() bool {
|
||||||
|
_, err := strconv.Atoi(s[4])
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
return len(s) > 4 && s[0] == pths && s[3] == responses && isInt()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) ResponseName() string {
|
||||||
|
if s.IsStatusCodeResponse() {
|
||||||
|
code, _ := strconv.Atoi(s[4])
|
||||||
|
return http.StatusText(code)
|
||||||
|
}
|
||||||
|
if s.IsDefaultResponse() {
|
||||||
|
return "Default"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var validMethods map[string]struct{}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
validMethods = map[string]struct{}{
|
||||||
|
"GET": {},
|
||||||
|
"HEAD": {},
|
||||||
|
"OPTIONS": {},
|
||||||
|
"PATCH": {},
|
||||||
|
"POST": {},
|
||||||
|
"PUT": {},
|
||||||
|
"DELETE": {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) PathItemRef() swspec.Ref {
|
||||||
|
if len(s) < 3 {
|
||||||
|
return swspec.Ref{}
|
||||||
|
}
|
||||||
|
pth, method := s[1], s[2]
|
||||||
|
if _, validMethod := validMethods[strings.ToUpper(method)]; !validMethod && !strings.HasPrefix(method, "x-") {
|
||||||
|
return swspec.Ref{}
|
||||||
|
}
|
||||||
|
return swspec.MustCreateRef("#" + path.Join("/", pths, jsonpointer.Escape(pth), strings.ToUpper(method)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s splitKey) PathRef() swspec.Ref {
|
||||||
|
if !s.IsOperation() {
|
||||||
|
return swspec.Ref{}
|
||||||
|
}
|
||||||
|
return swspec.MustCreateRef("#" + path.Join("/", pths, jsonpointer.Escape(s[1])))
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyParts(key string) splitKey {
|
||||||
|
var res []string
|
||||||
|
for _, part := range strings.Split(key[1:], "/") {
|
||||||
|
if part != "" {
|
||||||
|
res = append(res, jsonpointer.Unescape(part))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func rewriteSchemaToRef(spec *swspec.Swagger, key string, ref swspec.Ref) error {
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Printf("rewriting schema to ref for %s with %s", key, ref.String())
|
||||||
|
}
|
||||||
|
pth := key[1:]
|
||||||
|
ptr, err := jsonpointer.New(pth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
value, _, err := ptr.Get(spec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch refable := value.(type) {
|
||||||
|
case *swspec.Schema:
|
||||||
|
return rewriteParentRef(spec, key, ref)
|
||||||
|
case *swspec.SchemaOrBool:
|
||||||
|
if refable.Schema != nil {
|
||||||
|
refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
}
|
||||||
|
case *swspec.SchemaOrArray:
|
||||||
|
if refable.Schema != nil {
|
||||||
|
refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
}
|
||||||
|
case swspec.Schema:
|
||||||
|
return rewriteParentRef(spec, key, ref)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("no schema with ref found at %s for %T", key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func rewriteParentRef(spec *swspec.Swagger, key string, ref swspec.Ref) error {
|
||||||
|
pth := key[1:]
|
||||||
|
parent, entry := path.Dir(pth), path.Base(pth)
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Println("getting schema holder at:", parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
pptr, err := jsonpointer.New(parent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pvalue, _, err := pptr.Get(spec)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't get parent for %s: %v", parent, err)
|
||||||
|
}
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Printf("rewriting holder for %T", pvalue)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch container := pvalue.(type) {
|
||||||
|
case swspec.Response:
|
||||||
|
if err := rewriteParentRef(spec, "#"+parent, ref); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case *swspec.Response:
|
||||||
|
container.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
|
||||||
|
case *swspec.Responses:
|
||||||
|
statusCode, err := strconv.Atoi(entry)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s not a number: %v", pth, err)
|
||||||
|
}
|
||||||
|
resp := container.StatusCodeResponses[statusCode]
|
||||||
|
resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
container.StatusCodeResponses[statusCode] = resp
|
||||||
|
|
||||||
|
case map[string]swspec.Response:
|
||||||
|
resp := container[entry]
|
||||||
|
resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
container[entry] = resp
|
||||||
|
|
||||||
|
case swspec.Parameter:
|
||||||
|
if err := rewriteParentRef(spec, "#"+parent, ref); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case map[string]swspec.Parameter:
|
||||||
|
param := container[entry]
|
||||||
|
param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
container[entry] = param
|
||||||
|
|
||||||
|
case []swspec.Parameter:
|
||||||
|
idx, err := strconv.Atoi(entry)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s not a number: %v", pth, err)
|
||||||
|
}
|
||||||
|
param := container[idx]
|
||||||
|
param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
container[idx] = param
|
||||||
|
|
||||||
|
case swspec.Definitions:
|
||||||
|
container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
|
||||||
|
case map[string]swspec.Schema:
|
||||||
|
container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
|
||||||
|
case []swspec.Schema:
|
||||||
|
idx, err := strconv.Atoi(entry)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s not a number: %v", pth, err)
|
||||||
|
}
|
||||||
|
container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
|
||||||
|
case *swspec.SchemaOrArray:
|
||||||
|
idx, err := strconv.Atoi(entry)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s not a number: %v", pth, err)
|
||||||
|
}
|
||||||
|
container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cloneSchema(schema *swspec.Schema) (*swspec.Schema, error) {
|
||||||
|
var sch swspec.Schema
|
||||||
|
if err := swag.FromDynamicJSON(schema, &sch); err != nil {
|
||||||
|
return nil, fmt.Errorf("name inlined schema: %v", err)
|
||||||
|
}
|
||||||
|
return &sch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func importExternalReferences(opts *FlattenOpts) error {
|
||||||
|
groupedRefs := reverseIndexForSchemaRefs(opts)
|
||||||
|
|
||||||
|
for refStr, entry := range groupedRefs {
|
||||||
|
if !entry.Ref.HasFragmentOnly {
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Printf("importing external schema for [%s] from %s", strings.Join(entry.Keys, ", "), refStr)
|
||||||
|
}
|
||||||
|
// resolve to actual schema
|
||||||
|
sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if sch == nil {
|
||||||
|
return fmt.Errorf("no schema found at %s for [%s]", refStr, strings.Join(entry.Keys, ", "))
|
||||||
|
}
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Printf("importing external schema for [%s] from %s", strings.Join(entry.Keys, ", "), refStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate a unique name
|
||||||
|
newName := uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref))
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Printf("new name for [%s]: %s", strings.Join(entry.Keys, ", "), newName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// rewrite the external refs to local ones
|
||||||
|
for _, key := range entry.Keys {
|
||||||
|
if err := updateRef(opts.Swagger(), key, swspec.MustCreateRef("#"+path.Join("/definitions", newName))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add the resolved schema to the definitions
|
||||||
|
saveSchema(opts.Swagger(), newName, sch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type refRevIdx struct {
|
||||||
|
Ref swspec.Ref
|
||||||
|
Keys []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverseIndexForSchemaRefs(opts *FlattenOpts) map[string]refRevIdx {
|
||||||
|
collected := make(map[string]refRevIdx)
|
||||||
|
for key, schRef := range opts.Spec.references.schemas {
|
||||||
|
if entry, ok := collected[schRef.String()]; ok {
|
||||||
|
entry.Keys = append(entry.Keys, key)
|
||||||
|
collected[schRef.String()] = entry
|
||||||
|
} else {
|
||||||
|
collected[schRef.String()] = refRevIdx{
|
||||||
|
Ref: schRef,
|
||||||
|
Keys: []string{key},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return collected
|
||||||
|
}
|
||||||
|
|
||||||
|
func nameFromRef(ref swspec.Ref) string {
|
||||||
|
u := ref.GetURL()
|
||||||
|
if u.Fragment != "" {
|
||||||
|
return swag.ToJSONName(path.Base(u.Fragment))
|
||||||
|
}
|
||||||
|
if u.Path != "" {
|
||||||
|
bn := path.Base(u.Path)
|
||||||
|
if bn != "" && bn != "/" {
|
||||||
|
ext := path.Ext(bn)
|
||||||
|
if ext != "" {
|
||||||
|
return swag.ToJSONName(bn[:len(bn)-len(ext)])
|
||||||
|
}
|
||||||
|
return swag.ToJSONName(bn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return swag.ToJSONName(strings.Replace(u.Host, ".", " ", -1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func saveSchema(spec *swspec.Swagger, name string, schema *swspec.Schema) {
|
||||||
|
if schema == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if spec.Definitions == nil {
|
||||||
|
spec.Definitions = make(map[string]swspec.Schema, 150)
|
||||||
|
}
|
||||||
|
spec.Definitions[name] = *schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateRef(spec *swspec.Swagger, key string, ref swspec.Ref) error {
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Printf("updating ref for %s with %s", key, ref.String())
|
||||||
|
}
|
||||||
|
pth := key[1:]
|
||||||
|
ptr, err := jsonpointer.New(pth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
value, _, err := ptr.Get(spec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch refable := value.(type) {
|
||||||
|
case *swspec.Schema:
|
||||||
|
refable.Ref = ref
|
||||||
|
case *swspec.SchemaOrBool:
|
||||||
|
if refable.Schema != nil {
|
||||||
|
refable.Schema.Ref = ref
|
||||||
|
}
|
||||||
|
case *swspec.SchemaOrArray:
|
||||||
|
if refable.Schema != nil {
|
||||||
|
refable.Schema.Ref = ref
|
||||||
|
}
|
||||||
|
case swspec.Schema:
|
||||||
|
parent, entry := path.Dir(pth), path.Base(pth)
|
||||||
|
if swspec.Debug {
|
||||||
|
log.Println("getting schema holder at:", parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
pptr, err := jsonpointer.New(parent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pvalue, _, err := pptr.Get(spec)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't get parent for %s: %v", parent, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch container := pvalue.(type) {
|
||||||
|
case swspec.Definitions:
|
||||||
|
container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
|
||||||
|
case map[string]swspec.Schema:
|
||||||
|
container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
|
||||||
|
case []swspec.Schema:
|
||||||
|
idx, err := strconv.Atoi(entry)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s not a number: %v", pth, err)
|
||||||
|
}
|
||||||
|
container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
|
||||||
|
case *swspec.SchemaOrArray:
|
||||||
|
idx, err := strconv.Atoi(entry)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s not a number: %v", pth, err)
|
||||||
|
}
|
||||||
|
container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("no schema with ref found at %s for %T", key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsString(names []string, name string) bool {
|
||||||
|
for _, nm := range names {
|
||||||
|
if nm == name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type opRef struct {
|
||||||
|
Method string
|
||||||
|
Path string
|
||||||
|
Key string
|
||||||
|
ID string
|
||||||
|
Op *swspec.Operation
|
||||||
|
Ref swspec.Ref
|
||||||
|
}
|
||||||
|
|
||||||
|
type opRefs []opRef
|
||||||
|
|
||||||
|
func (o opRefs) Len() int { return len(o) }
|
||||||
|
func (o opRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||||
|
func (o opRefs) Less(i, j int) bool { return o[i].Key < o[j].Key }
|
||||||
|
|
||||||
|
func gatherOperations(specDoc *Spec, operationIDs []string) map[string]opRef {
|
||||||
|
var oprefs opRefs
|
||||||
|
|
||||||
|
for method, pathItem := range specDoc.Operations() {
|
||||||
|
for pth, operation := range pathItem {
|
||||||
|
vv := *operation
|
||||||
|
oprefs = append(oprefs, opRef{
|
||||||
|
Key: swag.ToGoName(strings.ToLower(method) + " " + pth),
|
||||||
|
Method: method,
|
||||||
|
Path: pth,
|
||||||
|
ID: vv.ID,
|
||||||
|
Op: &vv,
|
||||||
|
Ref: swspec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(oprefs)
|
||||||
|
|
||||||
|
operations := make(map[string]opRef)
|
||||||
|
for _, opr := range oprefs {
|
||||||
|
nm := opr.ID
|
||||||
|
if nm == "" {
|
||||||
|
nm = opr.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
oo, found := operations[nm]
|
||||||
|
if found && oo.Method != opr.Method && oo.Path != opr.Path {
|
||||||
|
nm = opr.Key
|
||||||
|
}
|
||||||
|
if len(operationIDs) == 0 || containsString(operationIDs, opr.ID) || containsString(operationIDs, nm) {
|
||||||
|
opr.ID = nm
|
||||||
|
opr.Op.ID = nm
|
||||||
|
operations[nm] = opr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return operations
|
||||||
|
}
|
|
@ -0,0 +1,186 @@
|
||||||
|
package analysis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/go-openapi/spec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mixin modifies the primary swagger spec by adding the paths and
|
||||||
|
// definitions from the mixin specs. Top level parameters and
|
||||||
|
// responses from the mixins are also carried over. Operation id
|
||||||
|
// collisions are avoided by appending "Mixin<N>" but only if
|
||||||
|
// needed. No other parts of primary are modified. Consider calling
|
||||||
|
// FixEmptyResponseDescriptions() on the modified primary if you read
|
||||||
|
// them from storage and they are valid to start with.
|
||||||
|
//
|
||||||
|
// Entries in "paths", "definitions", "parameters" and "responses" are
|
||||||
|
// added to the primary in the order of the given mixins. If the entry
|
||||||
|
// already exists in primary it is skipped with a warning message.
|
||||||
|
//
|
||||||
|
// The count of skipped entries (from collisions) is returned so any
|
||||||
|
// deviation from the number expected can flag warning in your build
|
||||||
|
// scripts. Carefully review the collisions before accepting them;
|
||||||
|
// consider renaming things if possible.
|
||||||
|
//
|
||||||
|
// No normalization of any keys takes place (paths, type defs,
|
||||||
|
// etc). Ensure they are canonical if your downstream tools do
|
||||||
|
// key normalization of any form.
|
||||||
|
func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
|
||||||
|
var skipped []string
|
||||||
|
opIds := getOpIds(primary)
|
||||||
|
if primary.Paths == nil {
|
||||||
|
primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)}
|
||||||
|
}
|
||||||
|
for i, m := range mixins {
|
||||||
|
for k, v := range m.Definitions {
|
||||||
|
// assume name collisions represent IDENTICAL type. careful.
|
||||||
|
if _, exists := primary.Definitions[k]; exists {
|
||||||
|
warn := fmt.Sprintf("definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
|
||||||
|
skipped = append(skipped, warn)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
primary.Definitions[k] = v
|
||||||
|
}
|
||||||
|
if m.Paths != nil {
|
||||||
|
for k, v := range m.Paths.Paths {
|
||||||
|
if _, exists := primary.Paths.Paths[k]; exists {
|
||||||
|
warn := fmt.Sprintf("paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
|
||||||
|
skipped = append(skipped, warn)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swagger requires that operationIds be
|
||||||
|
// unique within a spec. If we find a
|
||||||
|
// collision we append "Mixin0" to the
|
||||||
|
// operatoinId we are adding, where 0 is mixin
|
||||||
|
// index. We assume that operationIds with
|
||||||
|
// all the proivded specs are already unique.
|
||||||
|
piops := pathItemOps(v)
|
||||||
|
for _, piop := range piops {
|
||||||
|
if opIds[piop.ID] {
|
||||||
|
piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", i)
|
||||||
|
}
|
||||||
|
opIds[piop.ID] = true
|
||||||
|
}
|
||||||
|
primary.Paths.Paths[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range m.Parameters {
|
||||||
|
// could try to rename on conflict but would
|
||||||
|
// have to fix $refs in the mixin. Complain
|
||||||
|
// for now
|
||||||
|
if _, exists := primary.Parameters[k]; exists {
|
||||||
|
warn := fmt.Sprintf("top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
|
||||||
|
skipped = append(skipped, warn)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
primary.Parameters[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range m.Responses {
|
||||||
|
// could try to rename on conflict but would
|
||||||
|
// have to fix $refs in the mixin. Complain
|
||||||
|
// for now
|
||||||
|
if _, exists := primary.Responses[k]; exists {
|
||||||
|
warn := fmt.Sprintf("top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
|
||||||
|
skipped = append(skipped, warn)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
primary.Responses[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return skipped
|
||||||
|
}
|
||||||
|
|
||||||
|
// FixEmptyResponseDescriptions replaces empty ("") response
|
||||||
|
// descriptions in the input with "(empty)" to ensure that the
|
||||||
|
// resulting Swagger is stays valid. The problem appears to arise
|
||||||
|
// from reading in valid specs that have a explicit response
|
||||||
|
// description of "" (valid, response.description is required), but
|
||||||
|
// due to zero values being omitted upon re-serializing (omitempty) we
|
||||||
|
// lose them unless we stick some chars in there.
|
||||||
|
func FixEmptyResponseDescriptions(s *spec.Swagger) {
|
||||||
|
if s.Paths != nil {
|
||||||
|
for _, v := range s.Paths.Paths {
|
||||||
|
if v.Get != nil {
|
||||||
|
FixEmptyDescs(v.Get.Responses)
|
||||||
|
}
|
||||||
|
if v.Put != nil {
|
||||||
|
FixEmptyDescs(v.Put.Responses)
|
||||||
|
}
|
||||||
|
if v.Post != nil {
|
||||||
|
FixEmptyDescs(v.Post.Responses)
|
||||||
|
}
|
||||||
|
if v.Delete != nil {
|
||||||
|
FixEmptyDescs(v.Delete.Responses)
|
||||||
|
}
|
||||||
|
if v.Options != nil {
|
||||||
|
FixEmptyDescs(v.Options.Responses)
|
||||||
|
}
|
||||||
|
if v.Head != nil {
|
||||||
|
FixEmptyDescs(v.Head.Responses)
|
||||||
|
}
|
||||||
|
if v.Patch != nil {
|
||||||
|
FixEmptyDescs(v.Patch.Responses)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range s.Responses {
|
||||||
|
FixEmptyDesc(&v)
|
||||||
|
s.Responses[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FixEmptyDescs adds "(empty)" as the description for any Response in
|
||||||
|
// the given Responses object that doesn't already have one.
|
||||||
|
func FixEmptyDescs(rs *spec.Responses) {
|
||||||
|
FixEmptyDesc(rs.Default)
|
||||||
|
for k, v := range rs.StatusCodeResponses {
|
||||||
|
FixEmptyDesc(&v)
|
||||||
|
rs.StatusCodeResponses[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FixEmptyDesc adds "(empty)" as the description to the given
|
||||||
|
// Response object if it doesn't already have one and isn't a
|
||||||
|
// ref. No-op on nil input.
|
||||||
|
func FixEmptyDesc(rs *spec.Response) {
|
||||||
|
if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rs.Description = "(empty)"
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOpIds extracts all the paths.<path>.operationIds from the given
|
||||||
|
// spec and returns them as the keys in a map with 'true' values.
|
||||||
|
func getOpIds(s *spec.Swagger) map[string]bool {
|
||||||
|
rv := make(map[string]bool)
|
||||||
|
if s.Paths == nil {
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
for _, v := range s.Paths.Paths {
|
||||||
|
piops := pathItemOps(v)
|
||||||
|
for _, op := range piops {
|
||||||
|
rv[op.ID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
func pathItemOps(p spec.PathItem) []*spec.Operation {
|
||||||
|
var rv []*spec.Operation
|
||||||
|
rv = appendOp(rv, p.Get)
|
||||||
|
rv = appendOp(rv, p.Put)
|
||||||
|
rv = appendOp(rv, p.Post)
|
||||||
|
rv = appendOp(rv, p.Delete)
|
||||||
|
rv = appendOp(rv, p.Head)
|
||||||
|
rv = appendOp(rv, p.Patch)
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation {
|
||||||
|
if op == nil {
|
||||||
|
return ops
|
||||||
|
}
|
||||||
|
return append(ops, op)
|
||||||
|
}
|
|
@ -0,0 +1,233 @@
|
||||||
|
package analysis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/go-openapi/spec"
|
||||||
|
"github.com/go-openapi/strfmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SchemaOpts configures the schema analyzer
|
||||||
|
type SchemaOpts struct {
|
||||||
|
Schema *spec.Schema
|
||||||
|
Root interface{}
|
||||||
|
BasePath string
|
||||||
|
_ struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schema analysis, will classify the schema according to known
|
||||||
|
// patterns.
|
||||||
|
func Schema(opts SchemaOpts) (*AnalyzedSchema, error) {
|
||||||
|
a := &AnalyzedSchema{
|
||||||
|
schema: opts.Schema,
|
||||||
|
root: opts.Root,
|
||||||
|
basePath: opts.BasePath,
|
||||||
|
}
|
||||||
|
|
||||||
|
a.initializeFlags()
|
||||||
|
a.inferKnownType()
|
||||||
|
a.inferEnum()
|
||||||
|
a.inferBaseType()
|
||||||
|
|
||||||
|
if err := a.inferMap(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := a.inferArray(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := a.inferTuple(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := a.inferFromRef(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
a.inferSimpleSchema()
|
||||||
|
return a, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnalyzedSchema indicates what the schema represents
|
||||||
|
type AnalyzedSchema struct {
|
||||||
|
schema *spec.Schema
|
||||||
|
root interface{}
|
||||||
|
basePath string
|
||||||
|
|
||||||
|
hasProps bool
|
||||||
|
hasAllOf bool
|
||||||
|
hasItems bool
|
||||||
|
hasAdditionalProps bool
|
||||||
|
hasAdditionalItems bool
|
||||||
|
hasRef bool
|
||||||
|
|
||||||
|
IsKnownType bool
|
||||||
|
IsSimpleSchema bool
|
||||||
|
IsArray bool
|
||||||
|
IsSimpleArray bool
|
||||||
|
IsMap bool
|
||||||
|
IsSimpleMap bool
|
||||||
|
IsExtendedObject bool
|
||||||
|
IsTuple bool
|
||||||
|
IsTupleWithExtra bool
|
||||||
|
IsBaseType bool
|
||||||
|
IsEnum bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inherits copies value fields from other onto this schema
|
||||||
|
func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) {
|
||||||
|
if other == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.hasProps = other.hasProps
|
||||||
|
a.hasAllOf = other.hasAllOf
|
||||||
|
a.hasItems = other.hasItems
|
||||||
|
a.hasAdditionalItems = other.hasAdditionalItems
|
||||||
|
a.hasAdditionalProps = other.hasAdditionalProps
|
||||||
|
a.hasRef = other.hasRef
|
||||||
|
|
||||||
|
a.IsKnownType = other.IsKnownType
|
||||||
|
a.IsSimpleSchema = other.IsSimpleSchema
|
||||||
|
a.IsArray = other.IsArray
|
||||||
|
a.IsSimpleArray = other.IsSimpleArray
|
||||||
|
a.IsMap = other.IsMap
|
||||||
|
a.IsSimpleMap = other.IsSimpleMap
|
||||||
|
a.IsExtendedObject = other.IsExtendedObject
|
||||||
|
a.IsTuple = other.IsTuple
|
||||||
|
a.IsTupleWithExtra = other.IsTupleWithExtra
|
||||||
|
a.IsBaseType = other.IsBaseType
|
||||||
|
a.IsEnum = other.IsEnum
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) inferFromRef() error {
|
||||||
|
if a.hasRef {
|
||||||
|
opts := &spec.ExpandOptions{RelativeBase: a.basePath}
|
||||||
|
sch, err := spec.ResolveRefWithBase(a.root, &a.schema.Ref, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if sch != nil {
|
||||||
|
rsch, err := Schema(SchemaOpts{
|
||||||
|
Schema: sch,
|
||||||
|
Root: a.root,
|
||||||
|
BasePath: a.basePath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
a.inherits(rsch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) inferSimpleSchema() {
|
||||||
|
a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) inferKnownType() {
|
||||||
|
tpe := a.schema.Type
|
||||||
|
format := a.schema.Format
|
||||||
|
a.IsKnownType = tpe.Contains("boolean") ||
|
||||||
|
tpe.Contains("integer") ||
|
||||||
|
tpe.Contains("number") ||
|
||||||
|
tpe.Contains("string") ||
|
||||||
|
(format != "" && strfmt.Default.ContainsName(format)) ||
|
||||||
|
(a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) inferMap() error {
|
||||||
|
if a.isObjectType() {
|
||||||
|
hasExtra := a.hasProps || a.hasAllOf
|
||||||
|
a.IsMap = a.hasAdditionalProps && !hasExtra
|
||||||
|
a.IsExtendedObject = a.hasAdditionalProps && hasExtra
|
||||||
|
if a.IsMap {
|
||||||
|
if a.schema.AdditionalProperties.Schema != nil {
|
||||||
|
msch, err := Schema(SchemaOpts{
|
||||||
|
Schema: a.schema.AdditionalProperties.Schema,
|
||||||
|
Root: a.root,
|
||||||
|
BasePath: a.basePath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
a.IsSimpleMap = msch.IsSimpleSchema
|
||||||
|
} else if a.schema.AdditionalProperties.Allows {
|
||||||
|
a.IsSimpleMap = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) inferArray() error {
|
||||||
|
fromValid := a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Len() < 2)
|
||||||
|
a.IsArray = fromValid || (a.hasItems && a.schema.Items.Len() < 2)
|
||||||
|
if a.IsArray && a.hasItems {
|
||||||
|
if a.schema.Items.Schema != nil {
|
||||||
|
itsch, err := Schema(SchemaOpts{
|
||||||
|
Schema: a.schema.Items.Schema,
|
||||||
|
Root: a.root,
|
||||||
|
BasePath: a.basePath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
a.IsSimpleArray = itsch.IsSimpleSchema
|
||||||
|
}
|
||||||
|
if len(a.schema.Items.Schemas) > 0 {
|
||||||
|
itsch, err := Schema(SchemaOpts{
|
||||||
|
Schema: &a.schema.Items.Schemas[0],
|
||||||
|
Root: a.root,
|
||||||
|
BasePath: a.basePath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
a.IsSimpleArray = itsch.IsSimpleSchema
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if a.IsArray && !a.hasItems {
|
||||||
|
a.IsSimpleArray = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) inferTuple() error {
|
||||||
|
tuple := a.hasItems && a.schema.Items.Len() > 1
|
||||||
|
a.IsTuple = tuple && !a.hasAdditionalItems
|
||||||
|
a.IsTupleWithExtra = tuple && a.hasAdditionalItems
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) inferBaseType() {
|
||||||
|
if a.isObjectType() {
|
||||||
|
a.IsBaseType = a.schema.Discriminator != ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) inferEnum() {
|
||||||
|
a.IsEnum = len(a.schema.Enum) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) initializeFlags() {
|
||||||
|
a.hasProps = len(a.schema.Properties) > 0
|
||||||
|
a.hasAllOf = len(a.schema.AllOf) > 0
|
||||||
|
a.hasRef = a.schema.Ref.String() != ""
|
||||||
|
|
||||||
|
a.hasItems = a.schema.Items != nil &&
|
||||||
|
(a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0)
|
||||||
|
|
||||||
|
a.hasAdditionalProps = a.schema.AdditionalProperties != nil &&
|
||||||
|
(a.schema.AdditionalProperties != nil || a.schema.AdditionalProperties.Allows)
|
||||||
|
|
||||||
|
a.hasAdditionalItems = a.schema.AdditionalItems != nil &&
|
||||||
|
(a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) isObjectType() bool {
|
||||||
|
return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AnalyzedSchema) isArrayType() bool {
|
||||||
|
return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array"))
|
||||||
|
}
|
|
@ -0,0 +1,74 @@
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as
|
||||||
|
contributors and maintainers pledge to making participation in our project and
|
||||||
|
our community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity and
|
||||||
|
orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment
|
||||||
|
include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||||
|
advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic
|
||||||
|
address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable
|
||||||
|
behavior and are expected to take appropriate and fair corrective action in
|
||||||
|
response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or
|
||||||
|
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||||
|
permanently any contributor for other behaviors that they deem inappropriate,
|
||||||
|
threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community. Examples of
|
||||||
|
representing a project or community include using an official project e-mail
|
||||||
|
address, posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event. Representation of a project may be
|
||||||
|
further defined and clarified by project maintainers.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||||
|
complaints will be reviewed and investigated and will result in a response that
|
||||||
|
is deemed necessary and appropriate to the circumstances. The project team is
|
||||||
|
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||||
|
Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||||
|
faith may face temporary or permanent repercussions as determined by other
|
||||||
|
members of the project's leadership.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||||
|
available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/4/
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
||||||
|
# OpenAPI errors [![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||||
|
|
||||||
|
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/errors?status.svg)](http://godoc.org/github.com/go-openapi/errors)
|
||||||
|
|
||||||
|
Shared errors used throughout the various libraries for the go-openapi toolkit
|
|
@ -0,0 +1,150 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error represents a error interface all swagger framework errors implement
|
||||||
|
type Error interface {
|
||||||
|
error
|
||||||
|
Code() int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type apiError struct {
|
||||||
|
code int32
|
||||||
|
message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiError) Error() string {
|
||||||
|
return a.message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiError) Code() int32 {
|
||||||
|
return a.code
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new API error with a code and a message
|
||||||
|
func New(code int32, message string, args ...interface{}) Error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
return &apiError{code, fmt.Sprintf(message, args...)}
|
||||||
|
}
|
||||||
|
return &apiError{code, message}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotFound creates a new not found error
|
||||||
|
func NotFound(message string, args ...interface{}) Error {
|
||||||
|
if message == "" {
|
||||||
|
message = "Not found"
|
||||||
|
}
|
||||||
|
return New(http.StatusNotFound, fmt.Sprintf(message, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotImplemented creates a new not implemented error
|
||||||
|
func NotImplemented(message string) Error {
|
||||||
|
return New(http.StatusNotImplemented, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MethodNotAllowedError represents an error for when the path matches but the method doesn't
|
||||||
|
type MethodNotAllowedError struct {
|
||||||
|
code int32
|
||||||
|
Allowed []string
|
||||||
|
message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MethodNotAllowedError) Error() string {
|
||||||
|
return m.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code the error code
|
||||||
|
func (m *MethodNotAllowedError) Code() int32 {
|
||||||
|
return m.code
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorAsJSON(err Error) []byte {
|
||||||
|
b, _ := json.Marshal(struct {
|
||||||
|
Code int32 `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}{err.Code(), err.Error()})
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenComposite(errs *CompositeError) *CompositeError {
|
||||||
|
var res []error
|
||||||
|
for _, er := range errs.Errors {
|
||||||
|
switch e := er.(type) {
|
||||||
|
case *CompositeError:
|
||||||
|
if len(e.Errors) > 0 {
|
||||||
|
flat := flattenComposite(e)
|
||||||
|
if len(flat.Errors) > 0 {
|
||||||
|
res = append(res, flat.Errors...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if e != nil {
|
||||||
|
res = append(res, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return CompositeValidationError(res...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MethodNotAllowed creates a new method not allowed error
|
||||||
|
func MethodNotAllowed(requested string, allow []string) Error {
|
||||||
|
msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ","))
|
||||||
|
return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeError the error handler interface implemenation
|
||||||
|
func ServeError(rw http.ResponseWriter, r *http.Request, err error) {
|
||||||
|
rw.Header().Set("Content-Type", "application/json")
|
||||||
|
switch e := err.(type) {
|
||||||
|
case *CompositeError:
|
||||||
|
er := flattenComposite(e)
|
||||||
|
ServeError(rw, r, er.Errors[0])
|
||||||
|
case *MethodNotAllowedError:
|
||||||
|
rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ","))
|
||||||
|
rw.WriteHeader(asHTTPCode(int(e.Code())))
|
||||||
|
if r == nil || r.Method != "HEAD" {
|
||||||
|
rw.Write(errorAsJSON(e))
|
||||||
|
}
|
||||||
|
case Error:
|
||||||
|
if e == nil {
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error")))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rw.WriteHeader(asHTTPCode(int(e.Code())))
|
||||||
|
if r == nil || r.Method != "HEAD" {
|
||||||
|
rw.Write(errorAsJSON(e))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
rw.WriteHeader(http.StatusInternalServerError)
|
||||||
|
if r == nil || r.Method != "HEAD" {
|
||||||
|
rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func asHTTPCode(input int) int {
|
||||||
|
if input >= 600 {
|
||||||
|
return 422
|
||||||
|
}
|
||||||
|
return input
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
// Unauthenticated returns an unauthenticated error
|
||||||
|
func Unauthenticated(scheme string) Error {
|
||||||
|
return New(401, "unauthenticated for %s", scheme)
|
||||||
|
}
|
|
@ -0,0 +1,85 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validation represents a failure of a precondition
|
||||||
|
type Validation struct {
|
||||||
|
code int32
|
||||||
|
Name string
|
||||||
|
In string
|
||||||
|
Value interface{}
|
||||||
|
message string
|
||||||
|
Values []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Validation) Error() string {
|
||||||
|
return e.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code the error code
|
||||||
|
func (e *Validation) Code() int32 {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
contentTypeFail = `unsupported media type %q, only %v are allowed`
|
||||||
|
responseFormatFail = `unsupported media type requested, only %v are available`
|
||||||
|
)
|
||||||
|
|
||||||
|
// InvalidContentType error for an invalid content type
|
||||||
|
func InvalidContentType(value string, allowed []string) *Validation {
|
||||||
|
var values []interface{}
|
||||||
|
for _, v := range allowed {
|
||||||
|
values = append(values, v)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: http.StatusUnsupportedMediaType,
|
||||||
|
Name: "Content-Type",
|
||||||
|
In: "header",
|
||||||
|
Value: value,
|
||||||
|
Values: values,
|
||||||
|
message: fmt.Sprintf(contentTypeFail, value, allowed),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidResponseFormat error for an unacceptable response format request
|
||||||
|
func InvalidResponseFormat(value string, allowed []string) *Validation {
|
||||||
|
var values []interface{}
|
||||||
|
for _, v := range allowed {
|
||||||
|
values = append(values, v)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: http.StatusNotAcceptable,
|
||||||
|
Name: "Accept",
|
||||||
|
In: "header",
|
||||||
|
Value: value,
|
||||||
|
Values: values,
|
||||||
|
message: fmt.Sprintf(responseFormatFail, allowed),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate error message name for aliased property
|
||||||
|
func (e *Validation) ValidateName(name string) *Validation {
|
||||||
|
if e.Name == "" && name != "" {
|
||||||
|
e.Name = name
|
||||||
|
e.message = name+e.message
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIVerificationFailed is an error that contains all the missing info for a mismatched section
|
||||||
|
// between the api registrations and the api spec
|
||||||
|
type APIVerificationFailed struct {
|
||||||
|
Section string
|
||||||
|
MissingSpecification []string
|
||||||
|
MissingRegistration []string
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
func (v *APIVerificationFailed) Error() string {
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
|
hasRegMissing := len(v.MissingRegistration) > 0
|
||||||
|
hasSpecMissing := len(v.MissingSpecification) > 0
|
||||||
|
|
||||||
|
if hasRegMissing {
|
||||||
|
buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section))
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasRegMissing && hasSpecMissing {
|
||||||
|
buf.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasSpecMissing {
|
||||||
|
buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section))
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String()
|
||||||
|
}
|
|
@ -0,0 +1,59 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// ParseError respresents a parsing error
|
||||||
|
type ParseError struct {
|
||||||
|
code int32
|
||||||
|
Name string
|
||||||
|
In string
|
||||||
|
Value string
|
||||||
|
Reason error
|
||||||
|
message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ParseError) Error() string {
|
||||||
|
return e.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the http status code for this error
|
||||||
|
func (e *ParseError) Code() int32 {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
parseErrorTemplContent = `parsing %s %s from %q failed, because %s`
|
||||||
|
parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s`
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewParseError creates a new parse error
|
||||||
|
func NewParseError(name, in, value string, reason error) *ParseError {
|
||||||
|
var msg string
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason)
|
||||||
|
} else {
|
||||||
|
msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason)
|
||||||
|
}
|
||||||
|
return &ParseError{
|
||||||
|
code: 400,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: value,
|
||||||
|
Reason: reason,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,548 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
invalidType = "%s is an invalid type name"
|
||||||
|
typeFail = "%s in %s must be of type %s"
|
||||||
|
typeFailWithData = "%s in %s must be of type %s: %q"
|
||||||
|
typeFailWithError = "%s in %s must be of type %s, because: %s"
|
||||||
|
requiredFail = "%s in %s is required"
|
||||||
|
tooLongMessage = "%s in %s should be at most %d chars long"
|
||||||
|
tooShortMessage = "%s in %s should be at least %d chars long"
|
||||||
|
patternFail = "%s in %s should match '%s'"
|
||||||
|
enumFail = "%s in %s should be one of %v"
|
||||||
|
multipleOfFail = "%s in %s should be a multiple of %v"
|
||||||
|
maxIncFail = "%s in %s should be less than or equal to %v"
|
||||||
|
maxExcFail = "%s in %s should be less than %v"
|
||||||
|
minIncFail = "%s in %s should be greater than or equal to %v"
|
||||||
|
minExcFail = "%s in %s should be greater than %v"
|
||||||
|
uniqueFail = "%s in %s shouldn't contain duplicates"
|
||||||
|
maxItemsFail = "%s in %s should have at most %d items"
|
||||||
|
minItemsFail = "%s in %s should have at least %d items"
|
||||||
|
typeFailNoIn = "%s must be of type %s"
|
||||||
|
typeFailWithDataNoIn = "%s must be of type %s: %q"
|
||||||
|
typeFailWithErrorNoIn = "%s must be of type %s, because: %s"
|
||||||
|
requiredFailNoIn = "%s is required"
|
||||||
|
tooLongMessageNoIn = "%s should be at most %d chars long"
|
||||||
|
tooShortMessageNoIn = "%s should be at least %d chars long"
|
||||||
|
patternFailNoIn = "%s should match '%s'"
|
||||||
|
enumFailNoIn = "%s should be one of %v"
|
||||||
|
multipleOfFailNoIn = "%s should be a multiple of %v"
|
||||||
|
maxIncFailNoIn = "%s should be less than or equal to %v"
|
||||||
|
maxExcFailNoIn = "%s should be less than %v"
|
||||||
|
minIncFailNoIn = "%s should be greater than or equal to %v"
|
||||||
|
minExcFailNoIn = "%s should be greater than %v"
|
||||||
|
uniqueFailNoIn = "%s shouldn't contain duplicates"
|
||||||
|
maxItemsFailNoIn = "%s should have at most %d items"
|
||||||
|
minItemsFailNoIn = "%s should have at least %d items"
|
||||||
|
noAdditionalItems = "%s in %s can't have additional items"
|
||||||
|
noAdditionalItemsNoIn = "%s can't have additional items"
|
||||||
|
tooFewProperties = "%s in %s should have at least %d properties"
|
||||||
|
tooFewPropertiesNoIn = "%s should have at least %d properties"
|
||||||
|
tooManyProperties = "%s in %s should have at most %d properties"
|
||||||
|
tooManyPropertiesNoIn = "%s should have at most %d properties"
|
||||||
|
unallowedProperty = "%s.%s in %s is a forbidden property"
|
||||||
|
unallowedPropertyNoIn = "%s.%s is a forbidden property"
|
||||||
|
failedAllPatternProps = "%s.%s in %s failed all pattern properties"
|
||||||
|
failedAllPatternPropsNoIn = "%s.%s failed all pattern properties"
|
||||||
|
)
|
||||||
|
|
||||||
|
// All code responses can be used to differentiate errors for different handling
|
||||||
|
// by the consuming program
|
||||||
|
const (
|
||||||
|
// CompositeErrorCode remains 422 for backwards-compatibility
|
||||||
|
// and to separate it from validation errors with cause
|
||||||
|
CompositeErrorCode = 422
|
||||||
|
// InvalidTypeCode is used for any subclass of invalid types
|
||||||
|
InvalidTypeCode = 600 + iota
|
||||||
|
RequiredFailCode
|
||||||
|
TooLongFailCode
|
||||||
|
TooShortFailCode
|
||||||
|
PatternFailCode
|
||||||
|
EnumFailCode
|
||||||
|
MultipleOfFailCode
|
||||||
|
MaxFailCode
|
||||||
|
MinFailCode
|
||||||
|
UniqueFailCode
|
||||||
|
MaxItemsFailCode
|
||||||
|
MinItemsFailCode
|
||||||
|
NoAdditionalItemsCode
|
||||||
|
TooFewPropertiesCode
|
||||||
|
TooManyPropertiesCode
|
||||||
|
UnallowedPropertyCode
|
||||||
|
FailedAllPatternPropsCode
|
||||||
|
)
|
||||||
|
|
||||||
|
// CompositeError is an error that groups several errors together
|
||||||
|
type CompositeError struct {
|
||||||
|
Errors []error
|
||||||
|
code int32
|
||||||
|
message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code for this error
|
||||||
|
func (c *CompositeError) Code() int32 {
|
||||||
|
return c.code
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CompositeError) Error() string {
|
||||||
|
if len(c.Errors) > 0 {
|
||||||
|
msgs := []string{c.message + ":"}
|
||||||
|
for _, e := range c.Errors {
|
||||||
|
msgs = append(msgs, e.Error())
|
||||||
|
}
|
||||||
|
return strings.Join(msgs, "\n")
|
||||||
|
}
|
||||||
|
return c.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompositeValidationError an error to wrap a bunch of other errors
|
||||||
|
func CompositeValidationError(errors ...error) *CompositeError {
|
||||||
|
return &CompositeError{
|
||||||
|
code: CompositeErrorCode,
|
||||||
|
Errors: append([]error{}, errors...),
|
||||||
|
message: "validation failure list",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedAllPatternProperties an error for when the property doesn't match a pattern
|
||||||
|
func FailedAllPatternProperties(name, in, key string) *Validation {
|
||||||
|
msg := fmt.Sprintf(failedAllPatternProps, name, key, in)
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: FailedAllPatternPropsCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: key,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropertyNotAllowed an error for when the property doesn't match a pattern
|
||||||
|
func PropertyNotAllowed(name, in, key string) *Validation {
|
||||||
|
msg := fmt.Sprintf(unallowedProperty, name, key, in)
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(unallowedPropertyNoIn, name, key)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: UnallowedPropertyCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: key,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TooFewProperties an error for an object with too few properties
|
||||||
|
func TooFewProperties(name, in string, n int64) *Validation {
|
||||||
|
msg := fmt.Sprintf(tooFewProperties, name, in, n)
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: TooFewPropertiesCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: n,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TooManyProperties an error for an object with too many properties
|
||||||
|
func TooManyProperties(name, in string, n int64) *Validation {
|
||||||
|
msg := fmt.Sprintf(tooManyProperties, name, in, n)
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: TooManyPropertiesCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: n,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AdditionalItemsNotAllowed an error for invalid additional items
|
||||||
|
func AdditionalItemsNotAllowed(name, in string) *Validation {
|
||||||
|
msg := fmt.Sprintf(noAdditionalItems, name, in)
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(noAdditionalItemsNoIn, name)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: NoAdditionalItemsCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidCollectionFormat another flavor of invalid type error
|
||||||
|
func InvalidCollectionFormat(name, in, format string) *Validation {
|
||||||
|
return &Validation{
|
||||||
|
code: InvalidTypeCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: format,
|
||||||
|
message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidTypeName an error for when the type is invalid
|
||||||
|
func InvalidTypeName(typeName string) *Validation {
|
||||||
|
return &Validation{
|
||||||
|
code: InvalidTypeCode,
|
||||||
|
Value: typeName,
|
||||||
|
message: fmt.Sprintf(invalidType, typeName),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidType creates an error for when the type is invalid
|
||||||
|
func InvalidType(name, in, typeName string, value interface{}) *Validation {
|
||||||
|
var message string
|
||||||
|
|
||||||
|
if in != "" {
|
||||||
|
switch value.(type) {
|
||||||
|
case string:
|
||||||
|
message = fmt.Sprintf(typeFailWithData, name, in, typeName, value)
|
||||||
|
case error:
|
||||||
|
message = fmt.Sprintf(typeFailWithError, name, in, typeName, value)
|
||||||
|
default:
|
||||||
|
message = fmt.Sprintf(typeFail, name, in, typeName)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
switch value.(type) {
|
||||||
|
case string:
|
||||||
|
message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value)
|
||||||
|
case error:
|
||||||
|
message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value)
|
||||||
|
default:
|
||||||
|
message = fmt.Sprintf(typeFailNoIn, name, typeName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Validation{
|
||||||
|
code: InvalidTypeCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: value,
|
||||||
|
message: message,
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DuplicateItems error for when an array contains duplicates
|
||||||
|
func DuplicateItems(name, in string) *Validation {
|
||||||
|
msg := fmt.Sprintf(uniqueFail, name, in)
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(uniqueFailNoIn, name)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: UniqueFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TooManyItems error for when an array contains too many items
|
||||||
|
func TooManyItems(name, in string, max int64) *Validation {
|
||||||
|
msg := fmt.Sprintf(maxItemsFail, name, in, max)
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(maxItemsFailNoIn, name, max)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Validation{
|
||||||
|
code: MaxItemsFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TooFewItems error for when an array contains too few items
|
||||||
|
func TooFewItems(name, in string, min int64) *Validation {
|
||||||
|
msg := fmt.Sprintf(minItemsFail, name, in, min)
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(minItemsFailNoIn, name, min)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: MinItemsFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExceedsMaximumInt error for when maxinum validation fails
|
||||||
|
func ExceedsMaximumInt(name, in string, max int64, exclusive bool) *Validation {
|
||||||
|
var message string
|
||||||
|
if in == "" {
|
||||||
|
m := maxIncFailNoIn
|
||||||
|
if exclusive {
|
||||||
|
m = maxExcFailNoIn
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, max)
|
||||||
|
} else {
|
||||||
|
m := maxIncFail
|
||||||
|
if exclusive {
|
||||||
|
m = maxExcFail
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, in, max)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: MaxFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: max,
|
||||||
|
message: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExceedsMaximumUint error for when maxinum validation fails
|
||||||
|
func ExceedsMaximumUint(name, in string, max uint64, exclusive bool) *Validation {
|
||||||
|
var message string
|
||||||
|
if in == "" {
|
||||||
|
m := maxIncFailNoIn
|
||||||
|
if exclusive {
|
||||||
|
m = maxExcFailNoIn
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, max)
|
||||||
|
} else {
|
||||||
|
m := maxIncFail
|
||||||
|
if exclusive {
|
||||||
|
m = maxExcFail
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, in, max)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: MaxFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: max,
|
||||||
|
message: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExceedsMaximum error for when maxinum validation fails
|
||||||
|
func ExceedsMaximum(name, in string, max float64, exclusive bool) *Validation {
|
||||||
|
var message string
|
||||||
|
if in == "" {
|
||||||
|
m := maxIncFailNoIn
|
||||||
|
if exclusive {
|
||||||
|
m = maxExcFailNoIn
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, max)
|
||||||
|
} else {
|
||||||
|
m := maxIncFail
|
||||||
|
if exclusive {
|
||||||
|
m = maxExcFail
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, in, max)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: MaxFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: max,
|
||||||
|
message: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExceedsMinimumInt error for when maxinum validation fails
|
||||||
|
func ExceedsMinimumInt(name, in string, min int64, exclusive bool) *Validation {
|
||||||
|
var message string
|
||||||
|
if in == "" {
|
||||||
|
m := minIncFailNoIn
|
||||||
|
if exclusive {
|
||||||
|
m = minExcFailNoIn
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, min)
|
||||||
|
} else {
|
||||||
|
m := minIncFail
|
||||||
|
if exclusive {
|
||||||
|
m = minExcFail
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, in, min)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: MinFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: min,
|
||||||
|
message: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExceedsMinimumUint error for when maxinum validation fails
|
||||||
|
func ExceedsMinimumUint(name, in string, min uint64, exclusive bool) *Validation {
|
||||||
|
var message string
|
||||||
|
if in == "" {
|
||||||
|
m := minIncFailNoIn
|
||||||
|
if exclusive {
|
||||||
|
m = minExcFailNoIn
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, min)
|
||||||
|
} else {
|
||||||
|
m := minIncFail
|
||||||
|
if exclusive {
|
||||||
|
m = minExcFail
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, in, min)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: MinFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: min,
|
||||||
|
message: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExceedsMinimum error for when maxinum validation fails
|
||||||
|
func ExceedsMinimum(name, in string, min float64, exclusive bool) *Validation {
|
||||||
|
var message string
|
||||||
|
if in == "" {
|
||||||
|
m := minIncFailNoIn
|
||||||
|
if exclusive {
|
||||||
|
m = minExcFailNoIn
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, min)
|
||||||
|
} else {
|
||||||
|
m := minIncFail
|
||||||
|
if exclusive {
|
||||||
|
m = minExcFail
|
||||||
|
}
|
||||||
|
message = fmt.Sprintf(m, name, in, min)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: MinFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: min,
|
||||||
|
message: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotMultipleOf error for when multiple of validation fails
|
||||||
|
func NotMultipleOf(name, in string, multiple float64) *Validation {
|
||||||
|
var msg string
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple)
|
||||||
|
} else {
|
||||||
|
msg = fmt.Sprintf(multipleOfFail, name, in, multiple)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: MultipleOfFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: multiple,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumFail error for when an enum validation fails
|
||||||
|
func EnumFail(name, in string, value interface{}, values []interface{}) *Validation {
|
||||||
|
var msg string
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(enumFailNoIn, name, values)
|
||||||
|
} else {
|
||||||
|
msg = fmt.Sprintf(enumFail, name, in, values)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Validation{
|
||||||
|
code: EnumFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
Value: value,
|
||||||
|
Values: values,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Required error for when a value is missing
|
||||||
|
func Required(name, in string) *Validation {
|
||||||
|
var msg string
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(requiredFailNoIn, name)
|
||||||
|
} else {
|
||||||
|
msg = fmt.Sprintf(requiredFail, name, in)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: RequiredFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TooLong error for when a string is too long
|
||||||
|
func TooLong(name, in string, max int64) *Validation {
|
||||||
|
var msg string
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(tooLongMessageNoIn, name, max)
|
||||||
|
} else {
|
||||||
|
msg = fmt.Sprintf(tooLongMessage, name, in, max)
|
||||||
|
}
|
||||||
|
return &Validation{
|
||||||
|
code: TooLongFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TooShort error for when a string is too short
|
||||||
|
func TooShort(name, in string, min int64) *Validation {
|
||||||
|
var msg string
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(tooShortMessageNoIn, name, min)
|
||||||
|
} else {
|
||||||
|
msg = fmt.Sprintf(tooShortMessage, name, in, min)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Validation{
|
||||||
|
code: TooShortFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedPattern error for when a string fails a regex pattern match
|
||||||
|
// the pattern that is returned is the ECMA syntax version of the pattern not the golang version.
|
||||||
|
func FailedPattern(name, in, pattern string) *Validation {
|
||||||
|
var msg string
|
||||||
|
if in == "" {
|
||||||
|
msg = fmt.Sprintf(patternFailNoIn, name, pattern)
|
||||||
|
} else {
|
||||||
|
msg = fmt.Sprintf(patternFail, name, in, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Validation{
|
||||||
|
code: PatternFailCode,
|
||||||
|
Name: name,
|
||||||
|
In: in,
|
||||||
|
message: msg,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,74 @@
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as
|
||||||
|
contributors and maintainers pledge to making participation in our project and
|
||||||
|
our community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity and
|
||||||
|
orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment
|
||||||
|
include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||||
|
advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic
|
||||||
|
address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable
|
||||||
|
behavior and are expected to take appropriate and fair corrective action in
|
||||||
|
response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or
|
||||||
|
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||||
|
permanently any contributor for other behaviors that they deem inappropriate,
|
||||||
|
threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community. Examples of
|
||||||
|
representing a project or community include using an official project e-mail
|
||||||
|
address, posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event. Representation of a project may be
|
||||||
|
further defined and clarified by project maintainers.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||||
|
complaints will be reviewed and investigated and will result in a response that
|
||||||
|
is deemed necessary and appropriate to the circumstances. The project team is
|
||||||
|
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||||
|
Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||||
|
faith may face temporary or permanent repercussions as determined by other
|
||||||
|
members of the project's leadership.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||||
|
available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/4/
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
||||||
|
# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||||
|
|
||||||
|
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads)
|
||||||
|
|
||||||
|
Loading of OAI specification documents from local or remote locations.
|
|
@ -0,0 +1,264 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package loads
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/go-openapi/analysis"
|
||||||
|
"github.com/go-openapi/spec"
|
||||||
|
"github.com/go-openapi/swag"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONDoc loads a json document from either a file or a remote url
|
||||||
|
func JSONDoc(path string) (json.RawMessage, error) {
|
||||||
|
data, err := swag.LoadFromFileOrHTTP(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return json.RawMessage(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocLoader represents a doc loader type
|
||||||
|
type DocLoader func(string) (json.RawMessage, error)
|
||||||
|
|
||||||
|
// DocMatcher represents a predicate to check if a loader matches
|
||||||
|
type DocMatcher func(string) bool
|
||||||
|
|
||||||
|
var (
|
||||||
|
loaders *loader
|
||||||
|
defaultLoader *loader
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
defaultLoader = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc}
|
||||||
|
loaders = defaultLoader
|
||||||
|
spec.PathLoader = loaders.Fn
|
||||||
|
AddLoader(swag.YAMLMatcher, swag.YAMLDoc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddLoader for a document
|
||||||
|
func AddLoader(predicate DocMatcher, load DocLoader) {
|
||||||
|
prev := loaders
|
||||||
|
loaders = &loader{
|
||||||
|
Match: predicate,
|
||||||
|
Fn: load,
|
||||||
|
Next: prev,
|
||||||
|
}
|
||||||
|
spec.PathLoader = loaders.Fn
|
||||||
|
}
|
||||||
|
|
||||||
|
type loader struct {
|
||||||
|
Fn DocLoader
|
||||||
|
Match DocMatcher
|
||||||
|
Next *loader
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONSpec loads a spec from a json document
|
||||||
|
func JSONSpec(path string) (*Document, error) {
|
||||||
|
data, err := JSONDoc(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// convert to json
|
||||||
|
return Analyzed(json.RawMessage(data), "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Document represents a swagger spec document
|
||||||
|
type Document struct {
|
||||||
|
// specAnalyzer
|
||||||
|
Analyzer *analysis.Spec
|
||||||
|
spec *spec.Swagger
|
||||||
|
specFilePath string
|
||||||
|
origSpec *spec.Swagger
|
||||||
|
schema *spec.Schema
|
||||||
|
raw json.RawMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spec loads a new spec document
|
||||||
|
func Spec(path string) (*Document, error) {
|
||||||
|
specURL, err := url.Parse(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var lastErr error
|
||||||
|
for l := loaders.Next; l != nil; l = l.Next {
|
||||||
|
if loaders.Match(specURL.Path) {
|
||||||
|
b, err2 := loaders.Fn(path)
|
||||||
|
if err2 != nil {
|
||||||
|
lastErr = err2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
doc, err := Analyzed(b, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if doc != nil {
|
||||||
|
doc.specFilePath = path
|
||||||
|
}
|
||||||
|
return doc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if lastErr != nil {
|
||||||
|
return nil, lastErr
|
||||||
|
}
|
||||||
|
b, err := defaultLoader.Fn(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
document, err := Analyzed(b, "")
|
||||||
|
if document != nil {
|
||||||
|
document.specFilePath = path
|
||||||
|
}
|
||||||
|
|
||||||
|
return document, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyzed creates a new analyzed spec document
|
||||||
|
func Analyzed(data json.RawMessage, version string) (*Document, error) {
|
||||||
|
if version == "" {
|
||||||
|
version = "2.0"
|
||||||
|
}
|
||||||
|
if version != "2.0" {
|
||||||
|
return nil, fmt.Errorf("spec version %q is not supported", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
raw := data
|
||||||
|
trimmed := bytes.TrimSpace(data)
|
||||||
|
if len(trimmed) > 0 {
|
||||||
|
if trimmed[0] != '{' && trimmed[0] != '[' {
|
||||||
|
yml, err := swag.BytesToYAMLDoc(trimmed)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("analyzed: %v", err)
|
||||||
|
}
|
||||||
|
d, err := swag.YAMLToJSON(yml)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("analyzed: %v", err)
|
||||||
|
}
|
||||||
|
raw = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
swspec := new(spec.Swagger)
|
||||||
|
if err := json.Unmarshal(raw, swspec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
origsqspec := new(spec.Swagger)
|
||||||
|
if err := json.Unmarshal(raw, origsqspec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d := &Document{
|
||||||
|
Analyzer: analysis.New(swspec),
|
||||||
|
schema: spec.MustLoadSwagger20Schema(),
|
||||||
|
spec: swspec,
|
||||||
|
raw: raw,
|
||||||
|
origSpec: origsqspec,
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expanded expands the ref fields in the spec document and returns a new spec document
|
||||||
|
func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
|
||||||
|
swspec := new(spec.Swagger)
|
||||||
|
if err := json.Unmarshal(d.raw, swspec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var expandOptions *spec.ExpandOptions
|
||||||
|
if len(options) > 0 {
|
||||||
|
expandOptions = options[1]
|
||||||
|
} else {
|
||||||
|
expandOptions = &spec.ExpandOptions{
|
||||||
|
RelativeBase: filepath.Dir(d.specFilePath),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := spec.ExpandSpec(swspec, expandOptions); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dd := &Document{
|
||||||
|
Analyzer: analysis.New(swspec),
|
||||||
|
spec: swspec,
|
||||||
|
schema: spec.MustLoadSwagger20Schema(),
|
||||||
|
raw: d.raw,
|
||||||
|
origSpec: d.origSpec,
|
||||||
|
}
|
||||||
|
return dd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasePath the base path for this spec
|
||||||
|
func (d *Document) BasePath() string {
|
||||||
|
return d.spec.BasePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns the version of this spec
|
||||||
|
func (d *Document) Version() string {
|
||||||
|
return d.spec.Swagger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schema returns the swagger 2.0 schema
|
||||||
|
func (d *Document) Schema() *spec.Schema {
|
||||||
|
return d.schema
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spec returns the swagger spec object model
|
||||||
|
func (d *Document) Spec() *spec.Swagger {
|
||||||
|
return d.spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// Host returns the host for the API
|
||||||
|
func (d *Document) Host() string {
|
||||||
|
return d.spec.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// Raw returns the raw swagger spec as json bytes
|
||||||
|
func (d *Document) Raw() json.RawMessage {
|
||||||
|
return d.raw
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Document) OrigSpec() *spec.Swagger {
|
||||||
|
return d.origSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetDefinitions gives a shallow copy with the models reset
|
||||||
|
func (d *Document) ResetDefinitions() *Document {
|
||||||
|
defs := make(map[string]spec.Schema, len(d.origSpec.Definitions))
|
||||||
|
for k, v := range d.origSpec.Definitions {
|
||||||
|
defs[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
d.spec.Definitions = defs
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pristine creates a new pristine document instance based on the input data
|
||||||
|
func (d *Document) Pristine() *Document {
|
||||||
|
dd, _ := Analyzed(d.Raw(), d.Version())
|
||||||
|
return dd
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpecFilePath returns the file path of the spec if one is defined
|
||||||
|
func (d *Document) SpecFilePath() string {
|
||||||
|
return d.specFilePath
|
||||||
|
}
|
|
@ -0,0 +1,74 @@
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
In the interest of fostering an open and welcoming environment, we as
|
||||||
|
contributors and maintainers pledge to making participation in our project and
|
||||||
|
our community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity and
|
||||||
|
orientation.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to creating a positive environment
|
||||||
|
include:
|
||||||
|
|
||||||
|
* Using welcoming and inclusive language
|
||||||
|
* Being respectful of differing viewpoints and experiences
|
||||||
|
* Gracefully accepting constructive criticism
|
||||||
|
* Focusing on what is best for the community
|
||||||
|
* Showing empathy towards other community members
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||||
|
advances
|
||||||
|
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or electronic
|
||||||
|
address, without explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Our Responsibilities
|
||||||
|
|
||||||
|
Project maintainers are responsible for clarifying the standards of acceptable
|
||||||
|
behavior and are expected to take appropriate and fair corrective action in
|
||||||
|
response to any instances of unacceptable behavior.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or
|
||||||
|
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||||
|
permanently any contributor for other behaviors that they deem inappropriate,
|
||||||
|
threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community. Examples of
|
||||||
|
representing a project or community include using an official project e-mail
|
||||||
|
address, posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event. Representation of a project may be
|
||||||
|
further defined and clarified by project maintainers.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||||
|
complaints will be reviewed and investigated and will result in a response that
|
||||||
|
is deemed necessary and appropriate to the circumstances. The project team is
|
||||||
|
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||||
|
Further details of specific enforcement policies may be posted separately.
|
||||||
|
|
||||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||||
|
faith may face temporary or permanent repercussions as determined by other
|
||||||
|
members of the project's leadership.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||||
|
available at [http://contributor-covenant.org/version/1/4][version]
|
||||||
|
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/4/
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
||||||
|
# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||||
|
|
||||||
|
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt)
|
||||||
|
|
||||||
|
strfmt represents a well known string format such as credit card or email. The go toolkit for open api specifications knows how to deal with those.
|
|
@ -0,0 +1,124 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package strfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/mailru/easyjson/jlexer"
|
||||||
|
"github.com/mailru/easyjson/jwriter"
|
||||||
|
|
||||||
|
"gopkg.in/mgo.v2/bson"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var id ObjectId
|
||||||
|
Default.Add("bsonobjectid", &id, IsBSONObjectID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBSONObjectID returns true when the string is a valid BSON.ObjectId
|
||||||
|
func IsBSONObjectID(str string) bool {
|
||||||
|
var id bson.ObjectId
|
||||||
|
return id.UnmarshalText([]byte(str)) == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ObjectId bson.ObjectId
|
||||||
|
|
||||||
|
// NewObjectId creates a ObjectId from a Hex String
|
||||||
|
func NewObjectId(hex string) ObjectId {
|
||||||
|
return ObjectId(bson.ObjectIdHex(hex))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText turns this instance into text
|
||||||
|
func (id *ObjectId) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(bson.ObjectId(*id).Hex()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText hydrates this instance from text
|
||||||
|
func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on
|
||||||
|
var rawID bson.ObjectId
|
||||||
|
if err := rawID.UnmarshalText(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*id = ObjectId(rawID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan read a value from a database driver
|
||||||
|
func (id *ObjectId) Scan(raw interface{}) error {
|
||||||
|
var data []byte
|
||||||
|
switch v := raw.(type) {
|
||||||
|
case []byte:
|
||||||
|
data = v
|
||||||
|
case string:
|
||||||
|
data = []byte(v)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return id.UnmarshalText(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value converts a value to a database driver value
|
||||||
|
func (id *ObjectId) Value() (driver.Value, error) {
|
||||||
|
return driver.Value(string(*id)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *ObjectId) String() string {
|
||||||
|
return string(*id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *ObjectId) MarshalJSON() ([]byte, error) {
|
||||||
|
var w jwriter.Writer
|
||||||
|
id.MarshalEasyJSON(&w)
|
||||||
|
return w.BuildBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *ObjectId) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
w.String(bson.ObjectId(*id).Hex())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *ObjectId) UnmarshalJSON(data []byte) error {
|
||||||
|
l := jlexer.Lexer{Data: data}
|
||||||
|
id.UnmarshalEasyJSON(&l)
|
||||||
|
return l.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *ObjectId) UnmarshalEasyJSON(in *jlexer.Lexer) {
|
||||||
|
if data := in.String(); in.Ok() {
|
||||||
|
*id = NewObjectId(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *ObjectId) GetBSON() (interface{}, error) {
|
||||||
|
return bson.M{"data": bson.ObjectId(*id).Hex()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id *ObjectId) SetBSON(raw bson.Raw) error {
|
||||||
|
var m bson.M
|
||||||
|
if err := raw.Unmarshal(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if data, ok := m["data"].(string); ok {
|
||||||
|
*id = NewObjectId(data)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("couldn't unmarshal bson raw value as ObjectId")
|
||||||
|
}
|
|
@ -0,0 +1,152 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package strfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gopkg.in/mgo.v2/bson"
|
||||||
|
|
||||||
|
"github.com/mailru/easyjson/jlexer"
|
||||||
|
"github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
d := Date{}
|
||||||
|
Default.Add("date", &d, IsDate)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDate returns true when the string is a valid date
|
||||||
|
func IsDate(str string) bool {
|
||||||
|
matches := rxDate.FindAllStringSubmatch(str, -1)
|
||||||
|
if len(matches) == 0 || len(matches[0]) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m := matches[0]
|
||||||
|
return !(m[2] < "01" || m[2] > "12" || m[3] < "01" || m[3] > "31")
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RFC3339FullDate represents a full-date as specified by RFC3339
|
||||||
|
// See: http://goo.gl/xXOvVd
|
||||||
|
RFC3339FullDate = "2006-01-02"
|
||||||
|
// DatePattern pattern to match for the date format from http://tools.ietf.org/html/rfc3339#section-5.6
|
||||||
|
DatePattern = `^([0-9]{4})-([0-9]{2})-([0-9]{2})`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
rxDate = regexp.MustCompile(DatePattern)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Date represents a date from the API
|
||||||
|
//
|
||||||
|
// swagger:strfmt date
|
||||||
|
type Date time.Time
|
||||||
|
|
||||||
|
// String converts this date into a string
|
||||||
|
func (d Date) String() string {
|
||||||
|
return time.Time(d).Format(RFC3339FullDate)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText parses a text representation into a date type
|
||||||
|
func (d *Date) UnmarshalText(text []byte) error {
|
||||||
|
if len(text) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
dd, err := time.Parse(RFC3339FullDate, string(text))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*d = Date(dd)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText serializes this date type to string
|
||||||
|
func (d Date) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(d.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan scans a Date value from database driver type.
|
||||||
|
func (d *Date) Scan(raw interface{}) error {
|
||||||
|
switch v := raw.(type) {
|
||||||
|
case []byte:
|
||||||
|
return d.UnmarshalText(v)
|
||||||
|
case string:
|
||||||
|
return d.UnmarshalText([]byte(v))
|
||||||
|
case time.Time:
|
||||||
|
*d = Date(v)
|
||||||
|
return nil
|
||||||
|
case nil:
|
||||||
|
*d = Date{}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value converts Date to a primitive value ready to written to a database.
|
||||||
|
func (d Date) Value() (driver.Value, error) {
|
||||||
|
return driver.Value(d.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Date) MarshalJSON() ([]byte, error) {
|
||||||
|
var w jwriter.Writer
|
||||||
|
t.MarshalEasyJSON(&w)
|
||||||
|
return w.BuildBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Date) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
w.String(time.Time(t).Format(RFC3339FullDate))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Date) UnmarshalJSON(data []byte) error {
|
||||||
|
l := jlexer.Lexer{Data: data}
|
||||||
|
t.UnmarshalEasyJSON(&l)
|
||||||
|
return l.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Date) UnmarshalEasyJSON(in *jlexer.Lexer) {
|
||||||
|
if data := in.String(); in.Ok() {
|
||||||
|
tt, err := time.Parse(RFC3339FullDate, data)
|
||||||
|
if err != nil {
|
||||||
|
in.AddError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*t = Date(tt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Date) GetBSON() (interface{}, error) {
|
||||||
|
return bson.M{"data": t.String()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Date) SetBSON(raw bson.Raw) error {
|
||||||
|
var m bson.M
|
||||||
|
if err := raw.Unmarshal(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if data, ok := m["data"].(string); ok {
|
||||||
|
rd, err := time.Parse(RFC3339FullDate, data)
|
||||||
|
*t = Date(rd)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("couldn't unmarshal bson raw value as Duration")
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,18 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package strfmt contains custom string formats
|
||||||
|
//
|
||||||
|
// TODO: add info on how to define and register a custom format
|
||||||
|
package strfmt
|
|
@ -0,0 +1,194 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package strfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gopkg.in/mgo.v2/bson"
|
||||||
|
|
||||||
|
"github.com/mailru/easyjson/jlexer"
|
||||||
|
"github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
d := Duration(0)
|
||||||
|
Default.Add("duration", &d, IsDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
timeUnits = [][]string{
|
||||||
|
{"ns", "nano"},
|
||||||
|
{"us", "µs", "micro"},
|
||||||
|
{"ms", "milli"},
|
||||||
|
{"s", "sec"},
|
||||||
|
{"m", "min"},
|
||||||
|
{"h", "hr", "hour"},
|
||||||
|
{"d", "day"},
|
||||||
|
{"w", "wk", "week"},
|
||||||
|
}
|
||||||
|
|
||||||
|
timeMultiplier = map[string]time.Duration{
|
||||||
|
"ns": time.Nanosecond,
|
||||||
|
"us": time.Microsecond,
|
||||||
|
"ms": time.Millisecond,
|
||||||
|
"s": time.Second,
|
||||||
|
"m": time.Minute,
|
||||||
|
"h": time.Hour,
|
||||||
|
"d": 24 * time.Hour,
|
||||||
|
"w": 7 * 24 * time.Hour,
|
||||||
|
}
|
||||||
|
|
||||||
|
durationMatcher = regexp.MustCompile(`((\d+)\s*([A-Za-zµ]+))`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsDuration returns true if the provided string is a valid duration
|
||||||
|
func IsDuration(str string) bool {
|
||||||
|
_, err := ParseDuration(str)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duration represents a duration
|
||||||
|
//
|
||||||
|
// swagger:strfmt duration
|
||||||
|
type Duration time.Duration
|
||||||
|
|
||||||
|
// MarshalText turns this instance into text
|
||||||
|
func (d Duration) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(time.Duration(d).String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText hydrates this instance from text
|
||||||
|
func (d *Duration) UnmarshalText(data []byte) error { // validation is performed later on
|
||||||
|
dd, err := ParseDuration(string(data))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*d = Duration(dd)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDuration parses a duration from a string, compatible with scala duration syntax
|
||||||
|
func ParseDuration(cand string) (time.Duration, error) {
|
||||||
|
if dur, err := time.ParseDuration(cand); err == nil {
|
||||||
|
return dur, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var dur time.Duration
|
||||||
|
ok := false
|
||||||
|
for _, match := range durationMatcher.FindAllStringSubmatch(cand, -1) {
|
||||||
|
|
||||||
|
factor, err := strconv.Atoi(match[2]) // converts string to int
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
unit := strings.ToLower(strings.TrimSpace(match[3]))
|
||||||
|
|
||||||
|
for _, variants := range timeUnits {
|
||||||
|
last := len(variants) - 1
|
||||||
|
multiplier := timeMultiplier[variants[0]]
|
||||||
|
|
||||||
|
for i, variant := range variants {
|
||||||
|
if (last == i && strings.HasPrefix(unit, variant)) || strings.EqualFold(variant, unit) {
|
||||||
|
ok = true
|
||||||
|
dur += (time.Duration(factor) * multiplier)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
return dur, nil
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("Unable to parse %s as duration", cand)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan reads a Duration value from database driver type.
|
||||||
|
func (d *Duration) Scan(raw interface{}) error {
|
||||||
|
switch v := raw.(type) {
|
||||||
|
// TODO: case []byte: // ?
|
||||||
|
case int64:
|
||||||
|
*d = Duration(v)
|
||||||
|
case float64:
|
||||||
|
*d = Duration(int64(v))
|
||||||
|
case nil:
|
||||||
|
*d = Duration(0)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot sql.Scan() strfmt.Duration from: %#v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value converts Duration to a primitive value ready to written to a database.
|
||||||
|
func (d Duration) Value() (driver.Value, error) {
|
||||||
|
return driver.Value(int64(d)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts this duration to a string
|
||||||
|
func (d Duration) String() string {
|
||||||
|
return time.Duration(d).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||||
|
var w jwriter.Writer
|
||||||
|
d.MarshalEasyJSON(&w)
|
||||||
|
return w.BuildBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Duration) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
w.String(time.Duration(d).String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Duration) UnmarshalJSON(data []byte) error {
|
||||||
|
l := jlexer.Lexer{Data: data}
|
||||||
|
d.UnmarshalEasyJSON(&l)
|
||||||
|
return l.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Duration) UnmarshalEasyJSON(in *jlexer.Lexer) {
|
||||||
|
if data := in.String(); in.Ok() {
|
||||||
|
tt, err := ParseDuration(data)
|
||||||
|
if err != nil {
|
||||||
|
in.AddError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*d = Duration(tt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Duration) GetBSON() (interface{}, error) {
|
||||||
|
return bson.M{"data": int64(*d)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Duration) SetBSON(raw bson.Raw) error {
|
||||||
|
var m bson.M
|
||||||
|
if err := raw.Unmarshal(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if data, ok := m["data"].(int64); ok {
|
||||||
|
*d = Duration(data)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("couldn't unmarshal bson raw value as Duration")
|
||||||
|
}
|
|
@ -0,0 +1,298 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package strfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-openapi/errors"
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Default is the default formats registry
|
||||||
|
var Default = NewSeededFormats(nil, nil)
|
||||||
|
|
||||||
|
// Validator represents a validator for a string format
|
||||||
|
type Validator func(string) bool
|
||||||
|
|
||||||
|
// Format represents a string format
|
||||||
|
type Format interface {
|
||||||
|
String() string
|
||||||
|
encoding.TextMarshaler
|
||||||
|
encoding.TextUnmarshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// Registry is a registry of string formats
|
||||||
|
type Registry interface {
|
||||||
|
Add(string, Format, Validator) bool
|
||||||
|
DelByName(string) bool
|
||||||
|
GetType(string) (reflect.Type, bool)
|
||||||
|
ContainsName(string) bool
|
||||||
|
Validates(string, string) bool
|
||||||
|
Parse(string, string) (interface{}, error)
|
||||||
|
MapStructureHookFunc() mapstructure.DecodeHookFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
type knownFormat struct {
|
||||||
|
Name string
|
||||||
|
OrigName string
|
||||||
|
Type reflect.Type
|
||||||
|
Validator Validator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameNormalizer is a function that normalizes a format name
|
||||||
|
type NameNormalizer func(string) string
|
||||||
|
|
||||||
|
// DefaultNameNormalizer removes all dashes
|
||||||
|
func DefaultNameNormalizer(name string) string {
|
||||||
|
return strings.Replace(name, "-", "", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultFormats struct {
|
||||||
|
sync.Mutex
|
||||||
|
data []knownFormat
|
||||||
|
normalizeName NameNormalizer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFormats creates a new formats registry seeded with the values from the default
|
||||||
|
func NewFormats() Registry {
|
||||||
|
return NewSeededFormats(Default.(*defaultFormats).data, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSeededFormats creates a new formats registry
|
||||||
|
func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry {
|
||||||
|
if normalizer == nil {
|
||||||
|
normalizer = DefaultNameNormalizer
|
||||||
|
}
|
||||||
|
// copy here, don't modify original
|
||||||
|
d := append([]knownFormat(nil), seeds...)
|
||||||
|
return &defaultFormats{
|
||||||
|
data: d,
|
||||||
|
normalizeName: normalizer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapStructureHookFunc is a decode hook function for mapstructure
|
||||||
|
func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
|
||||||
|
return func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) {
|
||||||
|
if from.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
for _, v := range f.data {
|
||||||
|
tpe, _ := f.GetType(v.Name)
|
||||||
|
if to == tpe {
|
||||||
|
switch v.Name {
|
||||||
|
case "date":
|
||||||
|
d, err := time.Parse(RFC3339FullDate, data.(string))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Date(d), nil
|
||||||
|
case "datetime":
|
||||||
|
return ParseDateTime(data.(string))
|
||||||
|
case "duration":
|
||||||
|
dur, err := ParseDuration(data.(string))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Duration(dur), nil
|
||||||
|
case "uri":
|
||||||
|
return URI(data.(string)), nil
|
||||||
|
case "email":
|
||||||
|
return Email(data.(string)), nil
|
||||||
|
case "uuid":
|
||||||
|
return UUID(data.(string)), nil
|
||||||
|
case "uuid3":
|
||||||
|
return UUID3(data.(string)), nil
|
||||||
|
case "uuid4":
|
||||||
|
return UUID4(data.(string)), nil
|
||||||
|
case "uuid5":
|
||||||
|
return UUID5(data.(string)), nil
|
||||||
|
case "hostname":
|
||||||
|
return Hostname(data.(string)), nil
|
||||||
|
case "ipv4":
|
||||||
|
return IPv4(data.(string)), nil
|
||||||
|
case "ipv6":
|
||||||
|
return IPv6(data.(string)), nil
|
||||||
|
case "mac":
|
||||||
|
return MAC(data.(string)), nil
|
||||||
|
case "isbn":
|
||||||
|
return ISBN(data.(string)), nil
|
||||||
|
case "isbn10":
|
||||||
|
return ISBN10(data.(string)), nil
|
||||||
|
case "isbn13":
|
||||||
|
return ISBN13(data.(string)), nil
|
||||||
|
case "creditcard":
|
||||||
|
return CreditCard(data.(string)), nil
|
||||||
|
case "ssn":
|
||||||
|
return SSN(data.(string)), nil
|
||||||
|
case "hexcolor":
|
||||||
|
return HexColor(data.(string)), nil
|
||||||
|
case "rgbcolor":
|
||||||
|
return RGBColor(data.(string)), nil
|
||||||
|
case "byte":
|
||||||
|
return Base64(data.(string)), nil
|
||||||
|
case "password":
|
||||||
|
return Password(data.(string)), nil
|
||||||
|
default:
|
||||||
|
return nil, errors.InvalidTypeName(v.Name)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a new format, return true if this was a new item instead of a replacement
|
||||||
|
func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bool {
|
||||||
|
f.Lock()
|
||||||
|
defer f.Unlock()
|
||||||
|
|
||||||
|
nme := f.normalizeName(name)
|
||||||
|
|
||||||
|
tpe := reflect.TypeOf(strfmt)
|
||||||
|
if tpe.Kind() == reflect.Ptr {
|
||||||
|
tpe = tpe.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range f.data {
|
||||||
|
v := &f.data[i]
|
||||||
|
if v.Name == nme {
|
||||||
|
v.Type = tpe
|
||||||
|
v.Validator = validator
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// turns out it's new after all
|
||||||
|
f.data = append(f.data, knownFormat{Name: nme, OrigName: name, Type: tpe, Validator: validator})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetType gets the type for the specified name
|
||||||
|
func (f *defaultFormats) GetType(name string) (reflect.Type, bool) {
|
||||||
|
f.Lock()
|
||||||
|
defer f.Unlock()
|
||||||
|
nme := f.normalizeName(name)
|
||||||
|
for _, v := range f.data {
|
||||||
|
if v.Name == nme {
|
||||||
|
return v.Type, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelByName removes the format by the specified name, returns true when an item was actually removed
|
||||||
|
func (f *defaultFormats) DelByName(name string) bool {
|
||||||
|
f.Lock()
|
||||||
|
defer f.Unlock()
|
||||||
|
|
||||||
|
nme := f.normalizeName(name)
|
||||||
|
|
||||||
|
for i, v := range f.data {
|
||||||
|
if v.Name == nme {
|
||||||
|
f.data[i] = knownFormat{} // release
|
||||||
|
f.data = append(f.data[:i], f.data[i+1:]...)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelByType removes the specified format, returns true when an item was actually removed
|
||||||
|
func (f *defaultFormats) DelByFormat(strfmt Format) bool {
|
||||||
|
f.Lock()
|
||||||
|
defer f.Unlock()
|
||||||
|
|
||||||
|
tpe := reflect.TypeOf(strfmt)
|
||||||
|
if tpe.Kind() == reflect.Ptr {
|
||||||
|
tpe = tpe.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range f.data {
|
||||||
|
if v.Type == tpe {
|
||||||
|
f.data[i] = knownFormat{} // release
|
||||||
|
f.data = append(f.data[:i], f.data[i+1:]...)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsName returns true if this registry contains the specified name
|
||||||
|
func (f *defaultFormats) ContainsName(name string) bool {
|
||||||
|
f.Lock()
|
||||||
|
defer f.Unlock()
|
||||||
|
nme := f.normalizeName(name)
|
||||||
|
for _, v := range f.data {
|
||||||
|
if v.Name == nme {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsFormat returns true if this registry contains the specified format
|
||||||
|
func (f *defaultFormats) ContainsFormat(strfmt Format) bool {
|
||||||
|
f.Lock()
|
||||||
|
defer f.Unlock()
|
||||||
|
tpe := reflect.TypeOf(strfmt)
|
||||||
|
if tpe.Kind() == reflect.Ptr {
|
||||||
|
tpe = tpe.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range f.data {
|
||||||
|
if v.Type == tpe {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *defaultFormats) Validates(name, data string) bool {
|
||||||
|
f.Lock()
|
||||||
|
defer f.Unlock()
|
||||||
|
nme := f.normalizeName(name)
|
||||||
|
for _, v := range f.data {
|
||||||
|
if v.Name == nme {
|
||||||
|
return v.Validator(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *defaultFormats) Parse(name, data string) (interface{}, error) {
|
||||||
|
f.Lock()
|
||||||
|
defer f.Unlock()
|
||||||
|
nme := f.normalizeName(name)
|
||||||
|
for _, v := range f.data {
|
||||||
|
if v.Name == nme {
|
||||||
|
nw := reflect.New(v.Type).Interface()
|
||||||
|
if dec, ok := nw.(encoding.TextUnmarshaler); ok {
|
||||||
|
if err := dec.UnmarshalText([]byte(data)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nw, nil
|
||||||
|
}
|
||||||
|
return nil, errors.InvalidTypeName(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errors.InvalidTypeName(name)
|
||||||
|
}
|
|
@ -0,0 +1,185 @@
|
||||||
|
// Copyright 2015 go-swagger maintainers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package strfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gopkg.in/mgo.v2/bson"
|
||||||
|
|
||||||
|
"github.com/mailru/easyjson/jlexer"
|
||||||
|
"github.com/mailru/easyjson/jwriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
dt := DateTime{}
|
||||||
|
Default.Add("datetime", &dt, IsDateTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDateTime returns true when the string is a valid date-time
|
||||||
|
func IsDateTime(str string) bool {
|
||||||
|
if len(str) < 4 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s := strings.Split(strings.ToLower(str), "t")
|
||||||
|
if len(s) < 2 || !IsDate(s[0]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
matches := rxDateTime.FindAllStringSubmatch(s[1], -1)
|
||||||
|
if len(matches) == 0 || len(matches[0]) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m := matches[0]
|
||||||
|
res := m[1] <= "23" && m[2] <= "59" && m[3] <= "59"
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RFC3339Millis represents a ISO8601 format to millis instead of to nanos
|
||||||
|
RFC3339Millis = "2006-01-02T15:04:05.000Z07:00"
|
||||||
|
// DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6
|
||||||
|
DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
dateTimeFormats = []string{RFC3339Millis, time.RFC3339, time.RFC3339Nano}
|
||||||
|
rxDateTime = regexp.MustCompile(DateTimePattern)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch
|
||||||
|
func ParseDateTime(data string) (DateTime, error) {
|
||||||
|
if data == "" {
|
||||||
|
return NewDateTime(), nil
|
||||||
|
}
|
||||||
|
var lastError error
|
||||||
|
for _, layout := range dateTimeFormats {
|
||||||
|
dd, err := time.Parse(layout, data)
|
||||||
|
if err != nil {
|
||||||
|
lastError = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lastError = nil
|
||||||
|
return DateTime(dd), nil
|
||||||
|
}
|
||||||
|
return DateTime{}, lastError
|
||||||
|
}
|
||||||
|
|
||||||
|
// DateTime is a time but it serializes to ISO8601 format with millis
|
||||||
|
// It knows how to read 3 different variations of a RFC3339 date time.
|
||||||
|
// Most APIs we encounter want either millisecond or second precision times.
|
||||||
|
// This just tries to make it worry-free.
|
||||||
|
//
|
||||||
|
// swagger:strfmt date-time
|
||||||
|
type DateTime time.Time
|
||||||
|
|
||||||
|
// NewDateTime is a representation of zero value for DateTime type
|
||||||
|
func NewDateTime() DateTime {
|
||||||
|
return DateTime(time.Unix(0, 0).UTC())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t DateTime) String() string {
|
||||||
|
return time.Time(t).Format(RFC3339Millis)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText implements the text marshaller interface
|
||||||
|
func (t DateTime) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(t.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements the text unmarshaller interface
|
||||||
|
func (t *DateTime) UnmarshalText(text []byte) error {
|
||||||
|
tt, err := ParseDateTime(string(text))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = tt
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan scans a DateTime value from database driver type.
|
||||||
|
func (t *DateTime) Scan(raw interface{}) error {
|
||||||
|
// TODO: case int64: and case float64: ?
|
||||||
|
switch v := raw.(type) {
|
||||||
|
case []byte:
|
||||||
|
return t.UnmarshalText(v)
|
||||||
|
case string:
|
||||||
|
return t.UnmarshalText([]byte(v))
|
||||||
|
case time.Time:
|
||||||
|
*t = DateTime(v)
|
||||||
|
case nil:
|
||||||
|
*t = DateTime{}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot sql.Scan() strfmt.DateTime from: %#v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value converts DateTime to a primitive value ready to written to a database.
|
||||||
|
func (t DateTime) Value() (driver.Value, error) {
|
||||||
|
return driver.Value(t.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t DateTime) MarshalJSON() ([]byte, error) {
|
||||||
|
var w jwriter.Writer
|
||||||
|
t.MarshalEasyJSON(&w)
|
||||||
|
return w.BuildBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t DateTime) MarshalEasyJSON(w *jwriter.Writer) {
|
||||||
|
w.String(time.Time(t).Format(RFC3339Millis))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *DateTime) UnmarshalJSON(data []byte) error {
|
||||||
|
l := jlexer.Lexer{Data: data}
|
||||||
|
t.UnmarshalEasyJSON(&l)
|
||||||
|
return l.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *DateTime) UnmarshalEasyJSON(in *jlexer.Lexer) {
|
||||||
|
if data := in.String(); in.Ok() {
|
||||||
|
tt, err := ParseDateTime(data)
|
||||||
|
if err != nil {
|
||||||
|
in.AddError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*t = tt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *DateTime) GetBSON() (interface{}, error) {
|
||||||
|
return bson.M{"data": t.String()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *DateTime) SetBSON(raw bson.Raw) error {
|
||||||
|
var m bson.M
|
||||||
|
if err := raw.Unmarshal(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if data, ok := m["data"].(string); ok {
|
||||||
|
var err error
|
||||||
|
*t, err = ParseDateTime(data)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("couldn't unmarshal bson raw value as Duration")
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
mgo - MongoDB driver for Go
|
||||||
|
|
||||||
|
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||||
|
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,25 @@
|
||||||
|
BSON library for Go
|
||||||
|
|
||||||
|
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||||
|
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,738 @@
|
||||||
|
// BSON library for Go
|
||||||
|
//
|
||||||
|
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||||
|
//
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
// list of conditions and the following disclaimer.
|
||||||
|
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Package bson is an implementation of the BSON specification for Go:
|
||||||
|
//
|
||||||
|
// http://bsonspec.org
|
||||||
|
//
|
||||||
|
// It was created as part of the mgo MongoDB driver for Go, but is standalone
|
||||||
|
// and may be used on its own without the driver.
|
||||||
|
package bson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// The public API.
|
||||||
|
|
||||||
|
// A value implementing the bson.Getter interface will have its GetBSON
|
||||||
|
// method called when the given value has to be marshalled, and the result
|
||||||
|
// of this method will be marshaled in place of the actual object.
|
||||||
|
//
|
||||||
|
// If GetBSON returns return a non-nil error, the marshalling procedure
|
||||||
|
// will stop and error out with the provided value.
|
||||||
|
type Getter interface {
|
||||||
|
GetBSON() (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A value implementing the bson.Setter interface will receive the BSON
|
||||||
|
// value via the SetBSON method during unmarshaling, and the object
|
||||||
|
// itself will not be changed as usual.
|
||||||
|
//
|
||||||
|
// If setting the value works, the method should return nil or alternatively
|
||||||
|
// bson.SetZero to set the respective field to its zero value (nil for
|
||||||
|
// pointer types). If SetBSON returns a value of type bson.TypeError, the
|
||||||
|
// BSON value will be omitted from a map or slice being decoded and the
|
||||||
|
// unmarshalling will continue. If it returns any other non-nil error, the
|
||||||
|
// unmarshalling procedure will stop and error out with the provided value.
|
||||||
|
//
|
||||||
|
// This interface is generally useful in pointer receivers, since the method
|
||||||
|
// will want to change the receiver. A type field that implements the Setter
|
||||||
|
// interface doesn't have to be a pointer, though.
|
||||||
|
//
|
||||||
|
// Unlike the usual behavior, unmarshalling onto a value that implements a
|
||||||
|
// Setter interface will NOT reset the value to its zero state. This allows
|
||||||
|
// the value to decide by itself how to be unmarshalled.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// type MyString string
|
||||||
|
//
|
||||||
|
// func (s *MyString) SetBSON(raw bson.Raw) error {
|
||||||
|
// return raw.Unmarshal(s)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type Setter interface {
|
||||||
|
SetBSON(raw Raw) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetZero may be returned from a SetBSON method to have the value set to
|
||||||
|
// its respective zero value. When used in pointer values, this will set the
|
||||||
|
// field to nil rather than to the pre-allocated value.
|
||||||
|
var SetZero = errors.New("set to zero")
|
||||||
|
|
||||||
|
// M is a convenient alias for a map[string]interface{} map, useful for
|
||||||
|
// dealing with BSON in a native way. For instance:
|
||||||
|
//
|
||||||
|
// bson.M{"a": 1, "b": true}
|
||||||
|
//
|
||||||
|
// There's no special handling for this type in addition to what's done anyway
|
||||||
|
// for an equivalent map type. Elements in the map will be dumped in an
|
||||||
|
// undefined ordered. See also the bson.D type for an ordered alternative.
|
||||||
|
type M map[string]interface{}
|
||||||
|
|
||||||
|
// D represents a BSON document containing ordered elements. For example:
|
||||||
|
//
|
||||||
|
// bson.D{{"a", 1}, {"b", true}}
|
||||||
|
//
|
||||||
|
// In some situations, such as when creating indexes for MongoDB, the order in
|
||||||
|
// which the elements are defined is important. If the order is not important,
|
||||||
|
// using a map is generally more comfortable. See bson.M and bson.RawD.
|
||||||
|
type D []DocElem
|
||||||
|
|
||||||
|
// DocElem is an element of the bson.D document representation.
|
||||||
|
type DocElem struct {
|
||||||
|
Name string
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map returns a map out of the ordered element name/value pairs in d.
|
||||||
|
func (d D) Map() (m M) {
|
||||||
|
m = make(M, len(d))
|
||||||
|
for _, item := range d {
|
||||||
|
m[item.Name] = item.Value
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Raw type represents raw unprocessed BSON documents and elements.
|
||||||
|
// Kind is the kind of element as defined per the BSON specification, and
|
||||||
|
// Data is the raw unprocessed data for the respective element.
|
||||||
|
// Using this type it is possible to unmarshal or marshal values partially.
|
||||||
|
//
|
||||||
|
// Relevant documentation:
|
||||||
|
//
|
||||||
|
// http://bsonspec.org/#/specification
|
||||||
|
//
|
||||||
|
type Raw struct {
|
||||||
|
Kind byte
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawD represents a BSON document containing raw unprocessed elements.
|
||||||
|
// This low-level representation may be useful when lazily processing
|
||||||
|
// documents of uncertain content, or when manipulating the raw content
|
||||||
|
// documents in general.
|
||||||
|
type RawD []RawDocElem
|
||||||
|
|
||||||
|
// See the RawD type.
|
||||||
|
type RawDocElem struct {
|
||||||
|
Name string
|
||||||
|
Value Raw
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
|
||||||
|
// long. MongoDB objects by default have such a property set in their "_id"
|
||||||
|
// property.
|
||||||
|
//
|
||||||
|
// http://www.mongodb.org/display/DOCS/Object+IDs
|
||||||
|
type ObjectId string
|
||||||
|
|
||||||
|
// ObjectIdHex returns an ObjectId from the provided hex representation.
|
||||||
|
// Calling this function with an invalid hex representation will
|
||||||
|
// cause a runtime panic. See the IsObjectIdHex function.
|
||||||
|
func ObjectIdHex(s string) ObjectId {
|
||||||
|
d, err := hex.DecodeString(s)
|
||||||
|
if err != nil || len(d) != 12 {
|
||||||
|
panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
|
||||||
|
}
|
||||||
|
return ObjectId(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsObjectIdHex returns whether s is a valid hex representation of
|
||||||
|
// an ObjectId. See the ObjectIdHex function.
|
||||||
|
func IsObjectIdHex(s string) bool {
|
||||||
|
if len(s) != 24 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, err := hex.DecodeString(s)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectIdCounter is atomically incremented when generating a new ObjectId
|
||||||
|
// using NewObjectId() function. It's used as a counter part of an id.
|
||||||
|
var objectIdCounter uint32 = readRandomUint32()
|
||||||
|
|
||||||
|
// readRandomUint32 returns a random objectIdCounter.
|
||||||
|
func readRandomUint32() uint32 {
|
||||||
|
var b [4]byte
|
||||||
|
_, err := io.ReadFull(rand.Reader, b[:])
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("cannot read random object id: %v", err))
|
||||||
|
}
|
||||||
|
return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
|
||||||
|
}
|
||||||
|
|
||||||
|
// machineId stores machine id generated once and used in subsequent calls
|
||||||
|
// to NewObjectId function.
|
||||||
|
var machineId = readMachineId()
|
||||||
|
var processId = os.Getpid()
|
||||||
|
|
||||||
|
// readMachineId generates and returns a machine id.
|
||||||
|
// If this function fails to get the hostname it will cause a runtime error.
|
||||||
|
func readMachineId() []byte {
|
||||||
|
var sum [3]byte
|
||||||
|
id := sum[:]
|
||||||
|
hostname, err1 := os.Hostname()
|
||||||
|
if err1 != nil {
|
||||||
|
_, err2 := io.ReadFull(rand.Reader, id)
|
||||||
|
if err2 != nil {
|
||||||
|
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
hw := md5.New()
|
||||||
|
hw.Write([]byte(hostname))
|
||||||
|
copy(id, hw.Sum(nil))
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObjectId returns a new unique ObjectId.
|
||||||
|
func NewObjectId() ObjectId {
|
||||||
|
var b [12]byte
|
||||||
|
// Timestamp, 4 bytes, big endian
|
||||||
|
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
|
||||||
|
// Machine, first 3 bytes of md5(hostname)
|
||||||
|
b[4] = machineId[0]
|
||||||
|
b[5] = machineId[1]
|
||||||
|
b[6] = machineId[2]
|
||||||
|
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
|
||||||
|
b[7] = byte(processId >> 8)
|
||||||
|
b[8] = byte(processId)
|
||||||
|
// Increment, 3 bytes, big endian
|
||||||
|
i := atomic.AddUint32(&objectIdCounter, 1)
|
||||||
|
b[9] = byte(i >> 16)
|
||||||
|
b[10] = byte(i >> 8)
|
||||||
|
b[11] = byte(i)
|
||||||
|
return ObjectId(b[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
|
||||||
|
// with the provided number of seconds from epoch UTC, and all other parts
|
||||||
|
// filled with zeroes. It's not safe to insert a document with an id generated
|
||||||
|
// by this method, it is useful only for queries to find documents with ids
|
||||||
|
// generated before or after the specified timestamp.
|
||||||
|
func NewObjectIdWithTime(t time.Time) ObjectId {
|
||||||
|
var b [12]byte
|
||||||
|
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
|
||||||
|
return ObjectId(string(b[:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a hex string representation of the id.
|
||||||
|
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
|
||||||
|
func (id ObjectId) String() string {
|
||||||
|
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hex returns a hex representation of the ObjectId.
|
||||||
|
func (id ObjectId) Hex() string {
|
||||||
|
return hex.EncodeToString([]byte(id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
|
||||||
|
func (id ObjectId) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var nullBytes = []byte("null")
|
||||||
|
|
||||||
|
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
|
||||||
|
func (id *ObjectId) UnmarshalJSON(data []byte) error {
|
||||||
|
if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
|
||||||
|
var v struct {
|
||||||
|
Id json.RawMessage `json:"$oid"`
|
||||||
|
Func struct {
|
||||||
|
Id json.RawMessage
|
||||||
|
} `json:"$oidFunc"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err == nil {
|
||||||
|
if len(v.Id) > 0 {
|
||||||
|
data = []byte(v.Id)
|
||||||
|
} else {
|
||||||
|
data = []byte(v.Func.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
|
||||||
|
*id = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
|
||||||
|
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
|
||||||
|
}
|
||||||
|
var buf [12]byte
|
||||||
|
_, err := hex.Decode(buf[:], data[1:25])
|
||||||
|
if err != nil {
|
||||||
|
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
|
||||||
|
}
|
||||||
|
*id = ObjectId(string(buf[:]))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
|
||||||
|
func (id ObjectId) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(fmt.Sprintf("%x", string(id))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
|
||||||
|
func (id *ObjectId) UnmarshalText(data []byte) error {
|
||||||
|
if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
|
||||||
|
*id = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(data) != 24 {
|
||||||
|
return fmt.Errorf("invalid ObjectId: %s", data)
|
||||||
|
}
|
||||||
|
var buf [12]byte
|
||||||
|
_, err := hex.Decode(buf[:], data[:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
|
||||||
|
}
|
||||||
|
*id = ObjectId(string(buf[:]))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
|
||||||
|
func (id ObjectId) Valid() bool {
|
||||||
|
return len(id) == 12
|
||||||
|
}
|
||||||
|
|
||||||
|
// byteSlice returns byte slice of id from start to end.
|
||||||
|
// Calling this function with an invalid id will cause a runtime panic.
|
||||||
|
func (id ObjectId) byteSlice(start, end int) []byte {
|
||||||
|
if len(id) != 12 {
|
||||||
|
panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
|
||||||
|
}
|
||||||
|
return []byte(string(id)[start:end])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time returns the timestamp part of the id.
|
||||||
|
// It's a runtime error to call this method with an invalid id.
|
||||||
|
func (id ObjectId) Time() time.Time {
|
||||||
|
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
|
||||||
|
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
|
||||||
|
return time.Unix(secs, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Machine returns the 3-byte machine id part of the id.
|
||||||
|
// It's a runtime error to call this method with an invalid id.
|
||||||
|
func (id ObjectId) Machine() []byte {
|
||||||
|
return id.byteSlice(4, 7)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pid returns the process id part of the id.
|
||||||
|
// It's a runtime error to call this method with an invalid id.
|
||||||
|
func (id ObjectId) Pid() uint16 {
|
||||||
|
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Counter returns the incrementing value part of the id.
|
||||||
|
// It's a runtime error to call this method with an invalid id.
|
||||||
|
func (id ObjectId) Counter() int32 {
|
||||||
|
b := id.byteSlice(9, 12)
|
||||||
|
// Counter is stored as big-endian 3-byte value
|
||||||
|
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Symbol type is similar to a string and is used in languages with a
|
||||||
|
// distinct symbol type.
|
||||||
|
type Symbol string
|
||||||
|
|
||||||
|
// Now returns the current time with millisecond precision. MongoDB stores
|
||||||
|
// timestamps with the same precision, so a Time returned from this method
|
||||||
|
// will not change after a roundtrip to the database. That's the only reason
|
||||||
|
// why this function exists. Using the time.Now function also works fine
|
||||||
|
// otherwise.
|
||||||
|
func Now() time.Time {
|
||||||
|
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MongoTimestamp is a special internal type used by MongoDB that for some
|
||||||
|
// strange reason has its own datatype defined in BSON.
|
||||||
|
type MongoTimestamp int64
|
||||||
|
|
||||||
|
type orderKey int64
|
||||||
|
|
||||||
|
// MaxKey is a special value that compares higher than all other possible BSON
|
||||||
|
// values in a MongoDB database.
|
||||||
|
var MaxKey = orderKey(1<<63 - 1)
|
||||||
|
|
||||||
|
// MinKey is a special value that compares lower than all other possible BSON
|
||||||
|
// values in a MongoDB database.
|
||||||
|
var MinKey = orderKey(-1 << 63)
|
||||||
|
|
||||||
|
type undefined struct{}
|
||||||
|
|
||||||
|
// Undefined represents the undefined BSON value.
|
||||||
|
var Undefined undefined
|
||||||
|
|
||||||
|
// Binary is a representation for non-standard binary values. Any kind should
|
||||||
|
// work, but the following are known as of this writing:
|
||||||
|
//
|
||||||
|
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
|
||||||
|
// 0x01 - Function (!?)
|
||||||
|
// 0x02 - Obsolete generic.
|
||||||
|
// 0x03 - UUID
|
||||||
|
// 0x05 - MD5
|
||||||
|
// 0x80 - User defined.
|
||||||
|
//
|
||||||
|
type Binary struct {
|
||||||
|
Kind byte
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegEx represents a regular expression. The Options field may contain
|
||||||
|
// individual characters defining the way in which the pattern should be
|
||||||
|
// applied, and must be sorted. Valid options as of this writing are 'i' for
|
||||||
|
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
|
||||||
|
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
|
||||||
|
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
|
||||||
|
// unicode. The value of the Options parameter is not verified before being
|
||||||
|
// marshaled into the BSON format.
|
||||||
|
type RegEx struct {
|
||||||
|
Pattern string
|
||||||
|
Options string
|
||||||
|
}
|
||||||
|
|
||||||
|
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
|
||||||
|
// will be marshaled as a mapping from identifiers to values that may be
|
||||||
|
// used when evaluating the provided Code.
|
||||||
|
type JavaScript struct {
|
||||||
|
Code string
|
||||||
|
Scope interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DBPointer refers to a document id in a namespace.
|
||||||
|
//
|
||||||
|
// This type is deprecated in the BSON specification and should not be used
|
||||||
|
// except for backwards compatibility with ancient applications.
|
||||||
|
type DBPointer struct {
|
||||||
|
Namespace string
|
||||||
|
Id ObjectId
|
||||||
|
}
|
||||||
|
|
||||||
|
const initialBufferSize = 64
|
||||||
|
|
||||||
|
func handleErr(err *error) {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
if _, ok := r.(runtime.Error); ok {
|
||||||
|
panic(r)
|
||||||
|
} else if _, ok := r.(externalPanic); ok {
|
||||||
|
panic(r)
|
||||||
|
} else if s, ok := r.(string); ok {
|
||||||
|
*err = errors.New(s)
|
||||||
|
} else if e, ok := r.(error); ok {
|
||||||
|
*err = e
|
||||||
|
} else {
|
||||||
|
panic(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal serializes the in value, which may be a map or a struct value.
|
||||||
|
// In the case of struct values, only exported fields will be serialized,
|
||||||
|
// and the order of serialized fields will match that of the struct itself.
|
||||||
|
// The lowercased field name is used as the key for each exported field,
|
||||||
|
// but this behavior may be changed using the respective field tag.
|
||||||
|
// The tag may also contain flags to tweak the marshalling behavior for
|
||||||
|
// the field. The tag formats accepted are:
|
||||||
|
//
|
||||||
|
// "[<key>][,<flag1>[,<flag2>]]"
|
||||||
|
//
|
||||||
|
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||||
|
//
|
||||||
|
// The following flags are currently supported:
|
||||||
|
//
|
||||||
|
// omitempty Only include the field if it's not set to the zero
|
||||||
|
// value for the type or to empty slices or maps.
|
||||||
|
//
|
||||||
|
// minsize Marshal an int64 value as an int32, if that's feasible
|
||||||
|
// while preserving the numeric value.
|
||||||
|
//
|
||||||
|
// inline Inline the field, which must be a struct or a map,
|
||||||
|
// causing all of its fields or keys to be processed as if
|
||||||
|
// they were part of the outer struct. For maps, keys must
|
||||||
|
// not conflict with the bson keys of other struct fields.
|
||||||
|
//
|
||||||
|
// Some examples:
|
||||||
|
//
|
||||||
|
// type T struct {
|
||||||
|
// A bool
|
||||||
|
// B int "myb"
|
||||||
|
// C string "myc,omitempty"
|
||||||
|
// D string `bson:",omitempty" json:"jsonkey"`
|
||||||
|
// E int64 ",minsize"
|
||||||
|
// F int64 "myf,omitempty,minsize"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
func Marshal(in interface{}) (out []byte, err error) {
|
||||||
|
defer handleErr(&err)
|
||||||
|
e := &encoder{make([]byte, 0, initialBufferSize)}
|
||||||
|
e.addDoc(reflect.ValueOf(in))
|
||||||
|
return e.out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal deserializes data from in into the out value. The out value
|
||||||
|
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
|
||||||
|
// In the case of struct values, only exported fields will be deserialized.
|
||||||
|
// The lowercased field name is used as the key for each exported field,
|
||||||
|
// but this behavior may be changed using the respective field tag.
|
||||||
|
// The tag may also contain flags to tweak the marshalling behavior for
|
||||||
|
// the field. The tag formats accepted are:
|
||||||
|
//
|
||||||
|
// "[<key>][,<flag1>[,<flag2>]]"
|
||||||
|
//
|
||||||
|
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||||
|
//
|
||||||
|
// The following flags are currently supported during unmarshal (see the
|
||||||
|
// Marshal method for other flags):
|
||||||
|
//
|
||||||
|
// inline Inline the field, which must be a struct or a map.
|
||||||
|
// Inlined structs are handled as if its fields were part
|
||||||
|
// of the outer struct. An inlined map causes keys that do
|
||||||
|
// not match any other struct field to be inserted in the
|
||||||
|
// map rather than being discarded as usual.
|
||||||
|
//
|
||||||
|
// The target field or element types of out may not necessarily match
|
||||||
|
// the BSON values of the provided data. The following conversions are
|
||||||
|
// made automatically:
|
||||||
|
//
|
||||||
|
// - Numeric types are converted if at least the integer part of the
|
||||||
|
// value would be preserved correctly
|
||||||
|
// - Bools are converted to numeric types as 1 or 0
|
||||||
|
// - Numeric types are converted to bools as true if not 0 or false otherwise
|
||||||
|
// - Binary and string BSON data is converted to a string, array or byte slice
|
||||||
|
//
|
||||||
|
// If the value would not fit the type and cannot be converted, it's
|
||||||
|
// silently skipped.
|
||||||
|
//
|
||||||
|
// Pointer values are initialized when necessary.
|
||||||
|
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||||
|
if raw, ok := out.(*Raw); ok {
|
||||||
|
raw.Kind = 3
|
||||||
|
raw.Data = in
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer handleErr(&err)
|
||||||
|
v := reflect.ValueOf(out)
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
fallthrough
|
||||||
|
case reflect.Map:
|
||||||
|
d := newDecoder(in)
|
||||||
|
d.readDocTo(v)
|
||||||
|
case reflect.Struct:
|
||||||
|
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
|
||||||
|
default:
|
||||||
|
return errors.New("Unmarshal needs a map or a pointer to a struct.")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal deserializes raw into the out value. If the out value type
|
||||||
|
// is not compatible with raw, a *bson.TypeError is returned.
|
||||||
|
//
|
||||||
|
// See the Unmarshal function documentation for more details on the
|
||||||
|
// unmarshalling process.
|
||||||
|
func (raw Raw) Unmarshal(out interface{}) (err error) {
|
||||||
|
defer handleErr(&err)
|
||||||
|
v := reflect.ValueOf(out)
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
v = v.Elem()
|
||||||
|
fallthrough
|
||||||
|
case reflect.Map:
|
||||||
|
d := newDecoder(raw.Data)
|
||||||
|
good := d.readElemTo(v, raw.Kind)
|
||||||
|
if !good {
|
||||||
|
return &TypeError{v.Type(), raw.Kind}
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
|
||||||
|
default:
|
||||||
|
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type TypeError struct {
|
||||||
|
Type reflect.Type
|
||||||
|
Kind byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TypeError) Error() string {
|
||||||
|
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Maintain a mapping of keys to structure field indexes
|
||||||
|
|
||||||
|
type structInfo struct {
|
||||||
|
FieldsMap map[string]fieldInfo
|
||||||
|
FieldsList []fieldInfo
|
||||||
|
InlineMap int
|
||||||
|
Zero reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldInfo struct {
|
||||||
|
Key string
|
||||||
|
Num int
|
||||||
|
OmitEmpty bool
|
||||||
|
MinSize bool
|
||||||
|
Inline []int
|
||||||
|
}
|
||||||
|
|
||||||
|
var structMap = make(map[reflect.Type]*structInfo)
|
||||||
|
var structMapMutex sync.RWMutex
|
||||||
|
|
||||||
|
type externalPanic string
|
||||||
|
|
||||||
|
func (e externalPanic) String() string {
|
||||||
|
return string(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||||
|
structMapMutex.RLock()
|
||||||
|
sinfo, found := structMap[st]
|
||||||
|
structMapMutex.RUnlock()
|
||||||
|
if found {
|
||||||
|
return sinfo, nil
|
||||||
|
}
|
||||||
|
n := st.NumField()
|
||||||
|
fieldsMap := make(map[string]fieldInfo)
|
||||||
|
fieldsList := make([]fieldInfo, 0, n)
|
||||||
|
inlineMap := -1
|
||||||
|
for i := 0; i != n; i++ {
|
||||||
|
field := st.Field(i)
|
||||||
|
if field.PkgPath != "" && !field.Anonymous {
|
||||||
|
continue // Private field
|
||||||
|
}
|
||||||
|
|
||||||
|
info := fieldInfo{Num: i}
|
||||||
|
|
||||||
|
tag := field.Tag.Get("bson")
|
||||||
|
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||||
|
tag = string(field.Tag)
|
||||||
|
}
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
inline := false
|
||||||
|
fields := strings.Split(tag, ",")
|
||||||
|
if len(fields) > 1 {
|
||||||
|
for _, flag := range fields[1:] {
|
||||||
|
switch flag {
|
||||||
|
case "omitempty":
|
||||||
|
info.OmitEmpty = true
|
||||||
|
case "minsize":
|
||||||
|
info.MinSize = true
|
||||||
|
case "inline":
|
||||||
|
inline = true
|
||||||
|
default:
|
||||||
|
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
|
||||||
|
panic(externalPanic(msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tag = fields[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
switch field.Type.Kind() {
|
||||||
|
case reflect.Map:
|
||||||
|
if inlineMap >= 0 {
|
||||||
|
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||||
|
}
|
||||||
|
if field.Type.Key() != reflect.TypeOf("") {
|
||||||
|
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||||
|
}
|
||||||
|
inlineMap = info.Num
|
||||||
|
case reflect.Struct:
|
||||||
|
sinfo, err := getStructInfo(field.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, finfo := range sinfo.FieldsList {
|
||||||
|
if _, found := fieldsMap[finfo.Key]; found {
|
||||||
|
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||||
|
return nil, errors.New(msg)
|
||||||
|
}
|
||||||
|
if finfo.Inline == nil {
|
||||||
|
finfo.Inline = []int{i, finfo.Num}
|
||||||
|
} else {
|
||||||
|
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||||
|
}
|
||||||
|
fieldsMap[finfo.Key] = finfo
|
||||||
|
fieldsList = append(fieldsList, finfo)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("Option ,inline needs a struct value or map field")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if tag != "" {
|
||||||
|
info.Key = tag
|
||||||
|
} else {
|
||||||
|
info.Key = strings.ToLower(field.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found = fieldsMap[info.Key]; found {
|
||||||
|
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||||
|
return nil, errors.New(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldsList = append(fieldsList, info)
|
||||||
|
fieldsMap[info.Key] = info
|
||||||
|
}
|
||||||
|
sinfo = &structInfo{
|
||||||
|
fieldsMap,
|
||||||
|
fieldsList,
|
||||||
|
inlineMap,
|
||||||
|
reflect.New(st).Elem(),
|
||||||
|
}
|
||||||
|
structMapMutex.Lock()
|
||||||
|
structMap[st] = sinfo
|
||||||
|
structMapMutex.Unlock()
|
||||||
|
return sinfo, nil
|
||||||
|
}
|
|
@ -0,0 +1,310 @@
|
||||||
|
// BSON library for Go
|
||||||
|
//
|
||||||
|
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||||
|
//
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
// list of conditions and the following disclaimer.
|
||||||
|
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package bson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decimal128 holds decimal128 BSON values.
|
||||||
|
type Decimal128 struct {
|
||||||
|
h, l uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Decimal128) String() string {
|
||||||
|
var pos int // positive sign
|
||||||
|
var e int // exponent
|
||||||
|
var h, l uint64 // significand high/low
|
||||||
|
|
||||||
|
if d.h>>63&1 == 0 {
|
||||||
|
pos = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
switch d.h >> 58 & (1<<5 - 1) {
|
||||||
|
case 0x1F:
|
||||||
|
return "NaN"
|
||||||
|
case 0x1E:
|
||||||
|
return "-Inf"[pos:]
|
||||||
|
}
|
||||||
|
|
||||||
|
l = d.l
|
||||||
|
if d.h>>61&3 == 3 {
|
||||||
|
// Bits: 1*sign 2*ignored 14*exponent 111*significand.
|
||||||
|
// Implicit 0b100 prefix in significand.
|
||||||
|
e = int(d.h>>47&(1<<14-1)) - 6176
|
||||||
|
//h = 4<<47 | d.h&(1<<47-1)
|
||||||
|
// Spec says all of these values are out of range.
|
||||||
|
h, l = 0, 0
|
||||||
|
} else {
|
||||||
|
// Bits: 1*sign 14*exponent 113*significand
|
||||||
|
e = int(d.h>>49&(1<<14-1)) - 6176
|
||||||
|
h = d.h & (1<<49 - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Would be handled by the logic below, but that's trivial and common.
|
||||||
|
if h == 0 && l == 0 && e == 0 {
|
||||||
|
return "-0"[pos:]
|
||||||
|
}
|
||||||
|
|
||||||
|
var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
|
||||||
|
var last = len(repr)
|
||||||
|
var i = len(repr)
|
||||||
|
var dot = len(repr) + e
|
||||||
|
var rem uint32
|
||||||
|
Loop:
|
||||||
|
for d9 := 0; d9 < 5; d9++ {
|
||||||
|
h, l, rem = divmod(h, l, 1e9)
|
||||||
|
for d1 := 0; d1 < 9; d1++ {
|
||||||
|
// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
|
||||||
|
if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
|
||||||
|
e += len(repr) - i
|
||||||
|
i--
|
||||||
|
repr[i] = '.'
|
||||||
|
last = i - 1
|
||||||
|
dot = len(repr) // Unmark.
|
||||||
|
}
|
||||||
|
c := '0' + byte(rem%10)
|
||||||
|
rem /= 10
|
||||||
|
i--
|
||||||
|
repr[i] = c
|
||||||
|
// Handle "0E+3", "1E+3", etc.
|
||||||
|
if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
|
||||||
|
last = i
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
if c != '0' {
|
||||||
|
last = i
|
||||||
|
}
|
||||||
|
// Break early. Works without it, but why.
|
||||||
|
if dot > i && l == 0 && h == 0 && rem == 0 {
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
repr[last-1] = '-'
|
||||||
|
last--
|
||||||
|
|
||||||
|
if e > 0 {
|
||||||
|
return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
|
||||||
|
}
|
||||||
|
if e < 0 {
|
||||||
|
return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
|
||||||
|
}
|
||||||
|
return string(repr[last+pos:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
|
||||||
|
div64 := uint64(div)
|
||||||
|
a := h >> 32
|
||||||
|
aq := a / div64
|
||||||
|
ar := a % div64
|
||||||
|
b := ar<<32 + h&(1<<32-1)
|
||||||
|
bq := b / div64
|
||||||
|
br := b % div64
|
||||||
|
c := br<<32 + l>>32
|
||||||
|
cq := c / div64
|
||||||
|
cr := c % div64
|
||||||
|
d := cr<<32 + l&(1<<32-1)
|
||||||
|
dq := d / div64
|
||||||
|
dr := d % div64
|
||||||
|
return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dNaN = Decimal128{0x1F << 58, 0}
|
||||||
|
var dPosInf = Decimal128{0x1E << 58, 0}
|
||||||
|
var dNegInf = Decimal128{0x3E << 58, 0}
|
||||||
|
|
||||||
|
func dErr(s string) (Decimal128, error) {
|
||||||
|
return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseDecimal128(s string) (Decimal128, error) {
|
||||||
|
orig := s
|
||||||
|
if s == "" {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
neg := s[0] == '-'
|
||||||
|
if neg || s[0] == '+' {
|
||||||
|
s = s[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
|
||||||
|
if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
|
||||||
|
return dNaN, nil
|
||||||
|
}
|
||||||
|
if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
|
||||||
|
if neg {
|
||||||
|
return dNegInf, nil
|
||||||
|
}
|
||||||
|
return dPosInf, nil
|
||||||
|
}
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
|
||||||
|
var h, l uint64
|
||||||
|
var e int
|
||||||
|
|
||||||
|
var add, ovr uint32
|
||||||
|
var mul uint32 = 1
|
||||||
|
var dot = -1
|
||||||
|
var digits = 0
|
||||||
|
var i = 0
|
||||||
|
for i < len(s) {
|
||||||
|
c := s[i]
|
||||||
|
if mul == 1e9 {
|
||||||
|
h, l, ovr = muladd(h, l, mul, add)
|
||||||
|
mul, add = 1, 0
|
||||||
|
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c >= '0' && c <= '9' {
|
||||||
|
i++
|
||||||
|
if c > '0' || digits > 0 {
|
||||||
|
digits++
|
||||||
|
}
|
||||||
|
if digits > 34 {
|
||||||
|
if c == '0' {
|
||||||
|
// Exact rounding.
|
||||||
|
e++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
mul *= 10
|
||||||
|
add *= 10
|
||||||
|
add += uint32(c - '0')
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c == '.' {
|
||||||
|
i++
|
||||||
|
if dot >= 0 || i == 1 && len(s) == 1 {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
if i == len(s) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if s[i] < '0' || s[i] > '9' || e > 0 {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
dot = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
if mul > 1 {
|
||||||
|
h, l, ovr = muladd(h, l, mul, add)
|
||||||
|
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dot >= 0 {
|
||||||
|
e += dot - i
|
||||||
|
}
|
||||||
|
if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
|
||||||
|
i++
|
||||||
|
eneg := s[i] == '-'
|
||||||
|
if eneg || s[i] == '+' {
|
||||||
|
i++
|
||||||
|
if i == len(s) {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n := 0
|
||||||
|
for i < len(s) && n < 1e4 {
|
||||||
|
c := s[i]
|
||||||
|
i++
|
||||||
|
if c < '0' || c > '9' {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
n *= 10
|
||||||
|
n += int(c - '0')
|
||||||
|
}
|
||||||
|
if eneg {
|
||||||
|
n = -n
|
||||||
|
}
|
||||||
|
e += n
|
||||||
|
for e < -6176 {
|
||||||
|
// Subnormal.
|
||||||
|
var div uint32 = 1
|
||||||
|
for div < 1e9 && e < -6176 {
|
||||||
|
div *= 10
|
||||||
|
e++
|
||||||
|
}
|
||||||
|
var rem uint32
|
||||||
|
h, l, rem = divmod(h, l, div)
|
||||||
|
if rem > 0 {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for e > 6111 {
|
||||||
|
// Clamped.
|
||||||
|
var mul uint32 = 1
|
||||||
|
for mul < 1e9 && e > 6111 {
|
||||||
|
mul *= 10
|
||||||
|
e--
|
||||||
|
}
|
||||||
|
h, l, ovr = muladd(h, l, mul, 0)
|
||||||
|
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if e < -6176 || e > 6111 {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < len(s) {
|
||||||
|
return dErr(orig)
|
||||||
|
}
|
||||||
|
|
||||||
|
h |= uint64(e+6176) & uint64(1<<14-1) << 49
|
||||||
|
if neg {
|
||||||
|
h |= 1 << 63
|
||||||
|
}
|
||||||
|
return Decimal128{h, l}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
|
||||||
|
mul64 := uint64(mul)
|
||||||
|
a := mul64 * (l & (1<<32 - 1))
|
||||||
|
b := a>>32 + mul64*(l>>32)
|
||||||
|
c := b>>32 + mul64*(h&(1<<32-1))
|
||||||
|
d := c>>32 + mul64*(h>>32)
|
||||||
|
|
||||||
|
a = a&(1<<32-1) + uint64(add)
|
||||||
|
b = b&(1<<32-1) + a>>32
|
||||||
|
c = c&(1<<32-1) + b>>32
|
||||||
|
d = d&(1<<32-1) + c>>32
|
||||||
|
|
||||||
|
return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
|
||||||
|
}
|
|
@ -0,0 +1,849 @@
|
||||||
|
// BSON library for Go
|
||||||
|
//
|
||||||
|
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||||
|
//
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
// list of conditions and the following disclaimer.
|
||||||
|
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
// gobson - BSON library for Go.
|
||||||
|
|
||||||
|
package bson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type decoder struct {
|
||||||
|
in []byte
|
||||||
|
i int
|
||||||
|
docType reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
var typeM = reflect.TypeOf(M{})
|
||||||
|
|
||||||
|
func newDecoder(in []byte) *decoder {
|
||||||
|
return &decoder{in, 0, typeM}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Some helper functions.
|
||||||
|
|
||||||
|
func corrupted() {
|
||||||
|
panic("Document is corrupted")
|
||||||
|
}
|
||||||
|
|
||||||
|
func settableValueOf(i interface{}) reflect.Value {
|
||||||
|
v := reflect.ValueOf(i)
|
||||||
|
sv := reflect.New(v.Type()).Elem()
|
||||||
|
sv.Set(v)
|
||||||
|
return sv
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Unmarshaling of documents.
|
||||||
|
|
||||||
|
const (
|
||||||
|
setterUnknown = iota
|
||||||
|
setterNone
|
||||||
|
setterType
|
||||||
|
setterAddr
|
||||||
|
)
|
||||||
|
|
||||||
|
var setterStyles map[reflect.Type]int
|
||||||
|
var setterIface reflect.Type
|
||||||
|
var setterMutex sync.RWMutex
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var iface Setter
|
||||||
|
setterIface = reflect.TypeOf(&iface).Elem()
|
||||||
|
setterStyles = make(map[reflect.Type]int)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setterStyle(outt reflect.Type) int {
|
||||||
|
setterMutex.RLock()
|
||||||
|
style := setterStyles[outt]
|
||||||
|
setterMutex.RUnlock()
|
||||||
|
if style == setterUnknown {
|
||||||
|
setterMutex.Lock()
|
||||||
|
defer setterMutex.Unlock()
|
||||||
|
if outt.Implements(setterIface) {
|
||||||
|
setterStyles[outt] = setterType
|
||||||
|
} else if reflect.PtrTo(outt).Implements(setterIface) {
|
||||||
|
setterStyles[outt] = setterAddr
|
||||||
|
} else {
|
||||||
|
setterStyles[outt] = setterNone
|
||||||
|
}
|
||||||
|
style = setterStyles[outt]
|
||||||
|
}
|
||||||
|
return style
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSetter(outt reflect.Type, out reflect.Value) Setter {
|
||||||
|
style := setterStyle(outt)
|
||||||
|
if style == setterNone {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if style == setterAddr {
|
||||||
|
if !out.CanAddr() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out = out.Addr()
|
||||||
|
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
|
||||||
|
out.Set(reflect.New(outt.Elem()))
|
||||||
|
}
|
||||||
|
return out.Interface().(Setter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func clearMap(m reflect.Value) {
|
||||||
|
var none reflect.Value
|
||||||
|
for _, k := range m.MapKeys() {
|
||||||
|
m.SetMapIndex(k, none)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readDocTo(out reflect.Value) {
|
||||||
|
var elemType reflect.Type
|
||||||
|
outt := out.Type()
|
||||||
|
outk := outt.Kind()
|
||||||
|
|
||||||
|
for {
|
||||||
|
if outk == reflect.Ptr && out.IsNil() {
|
||||||
|
out.Set(reflect.New(outt.Elem()))
|
||||||
|
}
|
||||||
|
if setter := getSetter(outt, out); setter != nil {
|
||||||
|
var raw Raw
|
||||||
|
d.readDocTo(reflect.ValueOf(&raw))
|
||||||
|
err := setter.SetBSON(raw)
|
||||||
|
if _, ok := err.(*TypeError); err != nil && !ok {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if outk == reflect.Ptr {
|
||||||
|
out = out.Elem()
|
||||||
|
outt = out.Type()
|
||||||
|
outk = out.Kind()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
var fieldsMap map[string]fieldInfo
|
||||||
|
var inlineMap reflect.Value
|
||||||
|
start := d.i
|
||||||
|
|
||||||
|
origout := out
|
||||||
|
if outk == reflect.Interface {
|
||||||
|
if d.docType.Kind() == reflect.Map {
|
||||||
|
mv := reflect.MakeMap(d.docType)
|
||||||
|
out.Set(mv)
|
||||||
|
out = mv
|
||||||
|
} else {
|
||||||
|
dv := reflect.New(d.docType).Elem()
|
||||||
|
out.Set(dv)
|
||||||
|
out = dv
|
||||||
|
}
|
||||||
|
outt = out.Type()
|
||||||
|
outk = outt.Kind()
|
||||||
|
}
|
||||||
|
|
||||||
|
docType := d.docType
|
||||||
|
keyType := typeString
|
||||||
|
convertKey := false
|
||||||
|
switch outk {
|
||||||
|
case reflect.Map:
|
||||||
|
keyType = outt.Key()
|
||||||
|
if keyType.Kind() != reflect.String {
|
||||||
|
panic("BSON map must have string keys. Got: " + outt.String())
|
||||||
|
}
|
||||||
|
if keyType != typeString {
|
||||||
|
convertKey = true
|
||||||
|
}
|
||||||
|
elemType = outt.Elem()
|
||||||
|
if elemType == typeIface {
|
||||||
|
d.docType = outt
|
||||||
|
}
|
||||||
|
if out.IsNil() {
|
||||||
|
out.Set(reflect.MakeMap(out.Type()))
|
||||||
|
} else if out.Len() > 0 {
|
||||||
|
clearMap(out)
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
if outt != typeRaw {
|
||||||
|
sinfo, err := getStructInfo(out.Type())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fieldsMap = sinfo.FieldsMap
|
||||||
|
out.Set(sinfo.Zero)
|
||||||
|
if sinfo.InlineMap != -1 {
|
||||||
|
inlineMap = out.Field(sinfo.InlineMap)
|
||||||
|
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
|
||||||
|
clearMap(inlineMap)
|
||||||
|
}
|
||||||
|
elemType = inlineMap.Type().Elem()
|
||||||
|
if elemType == typeIface {
|
||||||
|
d.docType = inlineMap.Type()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
switch outt.Elem() {
|
||||||
|
case typeDocElem:
|
||||||
|
origout.Set(d.readDocElems(outt))
|
||||||
|
return
|
||||||
|
case typeRawDocElem:
|
||||||
|
origout.Set(d.readRawDocElems(outt))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
panic("Unsupported document type for unmarshalling: " + out.Type().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
end := int(d.readInt32())
|
||||||
|
end += d.i - 4
|
||||||
|
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
for d.in[d.i] != '\x00' {
|
||||||
|
kind := d.readByte()
|
||||||
|
name := d.readCStr()
|
||||||
|
if d.i >= end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch outk {
|
||||||
|
case reflect.Map:
|
||||||
|
e := reflect.New(elemType).Elem()
|
||||||
|
if d.readElemTo(e, kind) {
|
||||||
|
k := reflect.ValueOf(name)
|
||||||
|
if convertKey {
|
||||||
|
k = k.Convert(keyType)
|
||||||
|
}
|
||||||
|
out.SetMapIndex(k, e)
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
if outt == typeRaw {
|
||||||
|
d.dropElem(kind)
|
||||||
|
} else {
|
||||||
|
if info, ok := fieldsMap[name]; ok {
|
||||||
|
if info.Inline == nil {
|
||||||
|
d.readElemTo(out.Field(info.Num), kind)
|
||||||
|
} else {
|
||||||
|
d.readElemTo(out.FieldByIndex(info.Inline), kind)
|
||||||
|
}
|
||||||
|
} else if inlineMap.IsValid() {
|
||||||
|
if inlineMap.IsNil() {
|
||||||
|
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||||
|
}
|
||||||
|
e := reflect.New(elemType).Elem()
|
||||||
|
if d.readElemTo(e, kind) {
|
||||||
|
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d.dropElem(kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.i >= end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.i++ // '\x00'
|
||||||
|
if d.i != end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
d.docType = docType
|
||||||
|
|
||||||
|
if outt == typeRaw {
|
||||||
|
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readArrayDocTo(out reflect.Value) {
|
||||||
|
end := int(d.readInt32())
|
||||||
|
end += d.i - 4
|
||||||
|
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
i := 0
|
||||||
|
l := out.Len()
|
||||||
|
for d.in[d.i] != '\x00' {
|
||||||
|
if i >= l {
|
||||||
|
panic("Length mismatch on array field")
|
||||||
|
}
|
||||||
|
kind := d.readByte()
|
||||||
|
for d.i < end && d.in[d.i] != '\x00' {
|
||||||
|
d.i++
|
||||||
|
}
|
||||||
|
if d.i >= end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
d.i++
|
||||||
|
d.readElemTo(out.Index(i), kind)
|
||||||
|
if d.i >= end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i != l {
|
||||||
|
panic("Length mismatch on array field")
|
||||||
|
}
|
||||||
|
d.i++ // '\x00'
|
||||||
|
if d.i != end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
|
||||||
|
tmp := make([]reflect.Value, 0, 8)
|
||||||
|
elemType := t.Elem()
|
||||||
|
if elemType == typeRawDocElem {
|
||||||
|
d.dropElem(0x04)
|
||||||
|
return reflect.Zero(t).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
end := int(d.readInt32())
|
||||||
|
end += d.i - 4
|
||||||
|
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
for d.in[d.i] != '\x00' {
|
||||||
|
kind := d.readByte()
|
||||||
|
for d.i < end && d.in[d.i] != '\x00' {
|
||||||
|
d.i++
|
||||||
|
}
|
||||||
|
if d.i >= end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
d.i++
|
||||||
|
e := reflect.New(elemType).Elem()
|
||||||
|
if d.readElemTo(e, kind) {
|
||||||
|
tmp = append(tmp, e)
|
||||||
|
}
|
||||||
|
if d.i >= end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.i++ // '\x00'
|
||||||
|
if d.i != end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
|
||||||
|
n := len(tmp)
|
||||||
|
slice := reflect.MakeSlice(t, n, n)
|
||||||
|
for i := 0; i != n; i++ {
|
||||||
|
slice.Index(i).Set(tmp[i])
|
||||||
|
}
|
||||||
|
return slice.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
var typeSlice = reflect.TypeOf([]interface{}{})
|
||||||
|
var typeIface = typeSlice.Elem()
|
||||||
|
|
||||||
|
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
|
||||||
|
docType := d.docType
|
||||||
|
d.docType = typ
|
||||||
|
slice := make([]DocElem, 0, 8)
|
||||||
|
d.readDocWith(func(kind byte, name string) {
|
||||||
|
e := DocElem{Name: name}
|
||||||
|
v := reflect.ValueOf(&e.Value)
|
||||||
|
if d.readElemTo(v.Elem(), kind) {
|
||||||
|
slice = append(slice, e)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
slicev := reflect.New(typ).Elem()
|
||||||
|
slicev.Set(reflect.ValueOf(slice))
|
||||||
|
d.docType = docType
|
||||||
|
return slicev
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
|
||||||
|
docType := d.docType
|
||||||
|
d.docType = typ
|
||||||
|
slice := make([]RawDocElem, 0, 8)
|
||||||
|
d.readDocWith(func(kind byte, name string) {
|
||||||
|
e := RawDocElem{Name: name}
|
||||||
|
v := reflect.ValueOf(&e.Value)
|
||||||
|
if d.readElemTo(v.Elem(), kind) {
|
||||||
|
slice = append(slice, e)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
slicev := reflect.New(typ).Elem()
|
||||||
|
slicev.Set(reflect.ValueOf(slice))
|
||||||
|
d.docType = docType
|
||||||
|
return slicev
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readDocWith(f func(kind byte, name string)) {
|
||||||
|
end := int(d.readInt32())
|
||||||
|
end += d.i - 4
|
||||||
|
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
for d.in[d.i] != '\x00' {
|
||||||
|
kind := d.readByte()
|
||||||
|
name := d.readCStr()
|
||||||
|
if d.i >= end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
f(kind, name)
|
||||||
|
if d.i >= end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.i++ // '\x00'
|
||||||
|
if d.i != end {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Unmarshaling of individual elements within a document.
|
||||||
|
|
||||||
|
var blackHole = settableValueOf(struct{}{})
|
||||||
|
|
||||||
|
func (d *decoder) dropElem(kind byte) {
|
||||||
|
d.readElemTo(blackHole, kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to decode an element from the document and put it into out.
|
||||||
|
// If the types are not compatible, the returned ok value will be
|
||||||
|
// false and out will be unchanged.
|
||||||
|
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
|
||||||
|
|
||||||
|
start := d.i
|
||||||
|
|
||||||
|
if kind == 0x03 {
|
||||||
|
// Delegate unmarshaling of documents.
|
||||||
|
outt := out.Type()
|
||||||
|
outk := out.Kind()
|
||||||
|
switch outk {
|
||||||
|
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
|
||||||
|
d.readDocTo(out)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if setterStyle(outt) != setterNone {
|
||||||
|
d.readDocTo(out)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if outk == reflect.Slice {
|
||||||
|
switch outt.Elem() {
|
||||||
|
case typeDocElem:
|
||||||
|
out.Set(d.readDocElems(outt))
|
||||||
|
case typeRawDocElem:
|
||||||
|
out.Set(d.readRawDocElems(outt))
|
||||||
|
default:
|
||||||
|
d.readDocTo(blackHole)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
d.readDocTo(blackHole)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var in interface{}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case 0x01: // Float64
|
||||||
|
in = d.readFloat64()
|
||||||
|
case 0x02: // UTF-8 string
|
||||||
|
in = d.readStr()
|
||||||
|
case 0x03: // Document
|
||||||
|
panic("Can't happen. Handled above.")
|
||||||
|
case 0x04: // Array
|
||||||
|
outt := out.Type()
|
||||||
|
if setterStyle(outt) != setterNone {
|
||||||
|
// Skip the value so its data is handed to the setter below.
|
||||||
|
d.dropElem(kind)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for outt.Kind() == reflect.Ptr {
|
||||||
|
outt = outt.Elem()
|
||||||
|
}
|
||||||
|
switch outt.Kind() {
|
||||||
|
case reflect.Array:
|
||||||
|
d.readArrayDocTo(out)
|
||||||
|
return true
|
||||||
|
case reflect.Slice:
|
||||||
|
in = d.readSliceDoc(outt)
|
||||||
|
default:
|
||||||
|
in = d.readSliceDoc(typeSlice)
|
||||||
|
}
|
||||||
|
case 0x05: // Binary
|
||||||
|
b := d.readBinary()
|
||||||
|
if b.Kind == 0x00 || b.Kind == 0x02 {
|
||||||
|
in = b.Data
|
||||||
|
} else {
|
||||||
|
in = b
|
||||||
|
}
|
||||||
|
case 0x06: // Undefined (obsolete, but still seen in the wild)
|
||||||
|
in = Undefined
|
||||||
|
case 0x07: // ObjectId
|
||||||
|
in = ObjectId(d.readBytes(12))
|
||||||
|
case 0x08: // Bool
|
||||||
|
in = d.readBool()
|
||||||
|
case 0x09: // Timestamp
|
||||||
|
// MongoDB handles timestamps as milliseconds.
|
||||||
|
i := d.readInt64()
|
||||||
|
if i == -62135596800000 {
|
||||||
|
in = time.Time{} // In UTC for convenience.
|
||||||
|
} else {
|
||||||
|
in = time.Unix(i/1e3, i%1e3*1e6)
|
||||||
|
}
|
||||||
|
case 0x0A: // Nil
|
||||||
|
in = nil
|
||||||
|
case 0x0B: // RegEx
|
||||||
|
in = d.readRegEx()
|
||||||
|
case 0x0C:
|
||||||
|
in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
|
||||||
|
case 0x0D: // JavaScript without scope
|
||||||
|
in = JavaScript{Code: d.readStr()}
|
||||||
|
case 0x0E: // Symbol
|
||||||
|
in = Symbol(d.readStr())
|
||||||
|
case 0x0F: // JavaScript with scope
|
||||||
|
d.i += 4 // Skip length
|
||||||
|
js := JavaScript{d.readStr(), make(M)}
|
||||||
|
d.readDocTo(reflect.ValueOf(js.Scope))
|
||||||
|
in = js
|
||||||
|
case 0x10: // Int32
|
||||||
|
in = int(d.readInt32())
|
||||||
|
case 0x11: // Mongo-specific timestamp
|
||||||
|
in = MongoTimestamp(d.readInt64())
|
||||||
|
case 0x12: // Int64
|
||||||
|
in = d.readInt64()
|
||||||
|
case 0x13: // Decimal128
|
||||||
|
in = Decimal128{
|
||||||
|
l: uint64(d.readInt64()),
|
||||||
|
h: uint64(d.readInt64()),
|
||||||
|
}
|
||||||
|
case 0x7F: // Max key
|
||||||
|
in = MaxKey
|
||||||
|
case 0xFF: // Min key
|
||||||
|
in = MinKey
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
|
||||||
|
}
|
||||||
|
|
||||||
|
outt := out.Type()
|
||||||
|
|
||||||
|
if outt == typeRaw {
|
||||||
|
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if setter := getSetter(outt, out); setter != nil {
|
||||||
|
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
|
||||||
|
if err == SetZero {
|
||||||
|
out.Set(reflect.Zero(outt))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if _, ok := err.(*TypeError); !ok {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if in == nil {
|
||||||
|
out.Set(reflect.Zero(outt))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
outk := outt.Kind()
|
||||||
|
|
||||||
|
// Dereference and initialize pointer if necessary.
|
||||||
|
first := true
|
||||||
|
for outk == reflect.Ptr {
|
||||||
|
if !out.IsNil() {
|
||||||
|
out = out.Elem()
|
||||||
|
} else {
|
||||||
|
elem := reflect.New(outt.Elem())
|
||||||
|
if first {
|
||||||
|
// Only set if value is compatible.
|
||||||
|
first = false
|
||||||
|
defer func(out, elem reflect.Value) {
|
||||||
|
if good {
|
||||||
|
out.Set(elem)
|
||||||
|
}
|
||||||
|
}(out, elem)
|
||||||
|
} else {
|
||||||
|
out.Set(elem)
|
||||||
|
}
|
||||||
|
out = elem
|
||||||
|
}
|
||||||
|
outt = out.Type()
|
||||||
|
outk = outt.Kind()
|
||||||
|
}
|
||||||
|
|
||||||
|
inv := reflect.ValueOf(in)
|
||||||
|
if outt == inv.Type() {
|
||||||
|
out.Set(inv)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
switch outk {
|
||||||
|
case reflect.Interface:
|
||||||
|
out.Set(inv)
|
||||||
|
return true
|
||||||
|
case reflect.String:
|
||||||
|
switch inv.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
out.SetString(inv.String())
|
||||||
|
return true
|
||||||
|
case reflect.Slice:
|
||||||
|
if b, ok := in.([]byte); ok {
|
||||||
|
out.SetString(string(b))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case reflect.Int, reflect.Int64:
|
||||||
|
if outt == typeJSONNumber {
|
||||||
|
out.SetString(strconv.FormatInt(inv.Int(), 10))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case reflect.Float64:
|
||||||
|
if outt == typeJSONNumber {
|
||||||
|
out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
// Remember, array (0x04) slices are built with the correct
|
||||||
|
// element type. If we are here, must be a cross BSON kind
|
||||||
|
// conversion (e.g. 0x05 unmarshalling on string).
|
||||||
|
if outt.Elem().Kind() != reflect.Uint8 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch inv.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
slice := []byte(inv.String())
|
||||||
|
out.Set(reflect.ValueOf(slice))
|
||||||
|
return true
|
||||||
|
case reflect.Slice:
|
||||||
|
switch outt.Kind() {
|
||||||
|
case reflect.Array:
|
||||||
|
reflect.Copy(out, inv)
|
||||||
|
case reflect.Slice:
|
||||||
|
out.SetBytes(inv.Bytes())
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
switch inv.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
out.SetInt(inv.Int())
|
||||||
|
return true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
out.SetInt(int64(inv.Float()))
|
||||||
|
return true
|
||||||
|
case reflect.Bool:
|
||||||
|
if inv.Bool() {
|
||||||
|
out.SetInt(1)
|
||||||
|
} else {
|
||||||
|
out.SetInt(0)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
panic("can't happen: no uint types in BSON (!?)")
|
||||||
|
}
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
switch inv.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
out.SetUint(uint64(inv.Int()))
|
||||||
|
return true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
out.SetUint(uint64(inv.Float()))
|
||||||
|
return true
|
||||||
|
case reflect.Bool:
|
||||||
|
if inv.Bool() {
|
||||||
|
out.SetUint(1)
|
||||||
|
} else {
|
||||||
|
out.SetUint(0)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
panic("Can't happen. No uint types in BSON.")
|
||||||
|
}
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
switch inv.Kind() {
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
out.SetFloat(inv.Float())
|
||||||
|
return true
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
out.SetFloat(float64(inv.Int()))
|
||||||
|
return true
|
||||||
|
case reflect.Bool:
|
||||||
|
if inv.Bool() {
|
||||||
|
out.SetFloat(1)
|
||||||
|
} else {
|
||||||
|
out.SetFloat(0)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
panic("Can't happen. No uint types in BSON?")
|
||||||
|
}
|
||||||
|
case reflect.Bool:
|
||||||
|
switch inv.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
out.SetBool(inv.Bool())
|
||||||
|
return true
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
out.SetBool(inv.Int() != 0)
|
||||||
|
return true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
out.SetBool(inv.Float() != 0)
|
||||||
|
return true
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
panic("Can't happen. No uint types in BSON?")
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
if outt == typeURL && inv.Kind() == reflect.String {
|
||||||
|
u, err := url.Parse(inv.String())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
out.Set(reflect.ValueOf(u).Elem())
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if outt == typeBinary {
|
||||||
|
if b, ok := in.([]byte); ok {
|
||||||
|
out.Set(reflect.ValueOf(Binary{Data: b}))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Parsers of basic types.
|
||||||
|
|
||||||
|
func (d *decoder) readRegEx() RegEx {
|
||||||
|
re := RegEx{}
|
||||||
|
re.Pattern = d.readCStr()
|
||||||
|
re.Options = d.readCStr()
|
||||||
|
return re
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readBinary() Binary {
|
||||||
|
l := d.readInt32()
|
||||||
|
b := Binary{}
|
||||||
|
b.Kind = d.readByte()
|
||||||
|
b.Data = d.readBytes(l)
|
||||||
|
if b.Kind == 0x02 && len(b.Data) >= 4 {
|
||||||
|
// Weird obsolete format with redundant length.
|
||||||
|
b.Data = b.Data[4:]
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readStr() string {
|
||||||
|
l := d.readInt32()
|
||||||
|
b := d.readBytes(l - 1)
|
||||||
|
if d.readByte() != '\x00' {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readCStr() string {
|
||||||
|
start := d.i
|
||||||
|
end := start
|
||||||
|
l := len(d.in)
|
||||||
|
for ; end != l; end++ {
|
||||||
|
if d.in[end] == '\x00' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.i = end + 1
|
||||||
|
if d.i > l {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
return string(d.in[start:end])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readBool() bool {
|
||||||
|
b := d.readByte()
|
||||||
|
if b == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if b == 1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readFloat64() float64 {
|
||||||
|
return math.Float64frombits(uint64(d.readInt64()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readInt32() int32 {
|
||||||
|
b := d.readBytes(4)
|
||||||
|
return int32((uint32(b[0]) << 0) |
|
||||||
|
(uint32(b[1]) << 8) |
|
||||||
|
(uint32(b[2]) << 16) |
|
||||||
|
(uint32(b[3]) << 24))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readInt64() int64 {
|
||||||
|
b := d.readBytes(8)
|
||||||
|
return int64((uint64(b[0]) << 0) |
|
||||||
|
(uint64(b[1]) << 8) |
|
||||||
|
(uint64(b[2]) << 16) |
|
||||||
|
(uint64(b[3]) << 24) |
|
||||||
|
(uint64(b[4]) << 32) |
|
||||||
|
(uint64(b[5]) << 40) |
|
||||||
|
(uint64(b[6]) << 48) |
|
||||||
|
(uint64(b[7]) << 56))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readByte() byte {
|
||||||
|
i := d.i
|
||||||
|
d.i++
|
||||||
|
if d.i > len(d.in) {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
return d.in[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) readBytes(length int32) []byte {
|
||||||
|
if length < 0 {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
start := d.i
|
||||||
|
d.i += int(length)
|
||||||
|
if d.i < start || d.i > len(d.in) {
|
||||||
|
corrupted()
|
||||||
|
}
|
||||||
|
return d.in[start : start+int(length)]
|
||||||
|
}
|
|
@ -0,0 +1,514 @@
|
||||||
|
// BSON library for Go
|
||||||
|
//
|
||||||
|
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||||
|
//
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
// list of conditions and the following disclaimer.
|
||||||
|
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
// gobson - BSON library for Go.
|
||||||
|
|
||||||
|
package bson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Some internal infrastructure.
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeBinary = reflect.TypeOf(Binary{})
|
||||||
|
typeObjectId = reflect.TypeOf(ObjectId(""))
|
||||||
|
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
|
||||||
|
typeSymbol = reflect.TypeOf(Symbol(""))
|
||||||
|
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
|
||||||
|
typeOrderKey = reflect.TypeOf(MinKey)
|
||||||
|
typeDocElem = reflect.TypeOf(DocElem{})
|
||||||
|
typeRawDocElem = reflect.TypeOf(RawDocElem{})
|
||||||
|
typeRaw = reflect.TypeOf(Raw{})
|
||||||
|
typeURL = reflect.TypeOf(url.URL{})
|
||||||
|
typeTime = reflect.TypeOf(time.Time{})
|
||||||
|
typeString = reflect.TypeOf("")
|
||||||
|
typeJSONNumber = reflect.TypeOf(json.Number(""))
|
||||||
|
)
|
||||||
|
|
||||||
|
const itoaCacheSize = 32
|
||||||
|
|
||||||
|
var itoaCache []string
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
itoaCache = make([]string, itoaCacheSize)
|
||||||
|
for i := 0; i != itoaCacheSize; i++ {
|
||||||
|
itoaCache[i] = strconv.Itoa(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func itoa(i int) string {
|
||||||
|
if i < itoaCacheSize {
|
||||||
|
return itoaCache[i]
|
||||||
|
}
|
||||||
|
return strconv.Itoa(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Marshaling of the document value itself.
|
||||||
|
|
||||||
|
type encoder struct {
|
||||||
|
out []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addDoc(v reflect.Value) {
|
||||||
|
for {
|
||||||
|
if vi, ok := v.Interface().(Getter); ok {
|
||||||
|
getv, err := vi.GetBSON()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
v = reflect.ValueOf(getv)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Type() == typeRaw {
|
||||||
|
raw := v.Interface().(Raw)
|
||||||
|
if raw.Kind != 0x03 && raw.Kind != 0x00 {
|
||||||
|
panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
|
||||||
|
}
|
||||||
|
if len(raw.Data) == 0 {
|
||||||
|
panic("Attempted to marshal empty Raw document")
|
||||||
|
}
|
||||||
|
e.addBytes(raw.Data...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
start := e.reserveInt32()
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Map:
|
||||||
|
e.addMap(v)
|
||||||
|
case reflect.Struct:
|
||||||
|
e.addStruct(v)
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
e.addSlice(v)
|
||||||
|
default:
|
||||||
|
panic("Can't marshal " + v.Type().String() + " as a BSON document")
|
||||||
|
}
|
||||||
|
|
||||||
|
e.addBytes(0)
|
||||||
|
e.setInt32(start, int32(len(e.out)-start))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addMap(v reflect.Value) {
|
||||||
|
for _, k := range v.MapKeys() {
|
||||||
|
e.addElem(k.String(), v.MapIndex(k), false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addStruct(v reflect.Value) {
|
||||||
|
sinfo, err := getStructInfo(v.Type())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
var value reflect.Value
|
||||||
|
if sinfo.InlineMap >= 0 {
|
||||||
|
m := v.Field(sinfo.InlineMap)
|
||||||
|
if m.Len() > 0 {
|
||||||
|
for _, k := range m.MapKeys() {
|
||||||
|
ks := k.String()
|
||||||
|
if _, found := sinfo.FieldsMap[ks]; found {
|
||||||
|
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
|
||||||
|
}
|
||||||
|
e.addElem(ks, m.MapIndex(k), false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, info := range sinfo.FieldsList {
|
||||||
|
if info.Inline == nil {
|
||||||
|
value = v.Field(info.Num)
|
||||||
|
} else {
|
||||||
|
value = v.FieldByIndex(info.Inline)
|
||||||
|
}
|
||||||
|
if info.OmitEmpty && isZero(value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
e.addElem(info.Key, value, info.MinSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZero(v reflect.Value) bool {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
return len(v.String()) == 0
|
||||||
|
case reflect.Ptr, reflect.Interface:
|
||||||
|
return v.IsNil()
|
||||||
|
case reflect.Slice:
|
||||||
|
return v.Len() == 0
|
||||||
|
case reflect.Map:
|
||||||
|
return v.Len() == 0
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return v.Int() == 0
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return v.Uint() == 0
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return v.Float() == 0
|
||||||
|
case reflect.Bool:
|
||||||
|
return !v.Bool()
|
||||||
|
case reflect.Struct:
|
||||||
|
vt := v.Type()
|
||||||
|
if vt == typeTime {
|
||||||
|
return v.Interface().(time.Time).IsZero()
|
||||||
|
}
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
|
||||||
|
continue // Private field
|
||||||
|
}
|
||||||
|
if !isZero(v.Field(i)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addSlice(v reflect.Value) {
|
||||||
|
vi := v.Interface()
|
||||||
|
if d, ok := vi.(D); ok {
|
||||||
|
for _, elem := range d {
|
||||||
|
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if d, ok := vi.(RawD); ok {
|
||||||
|
for _, elem := range d {
|
||||||
|
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l := v.Len()
|
||||||
|
et := v.Type().Elem()
|
||||||
|
if et == typeDocElem {
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
elem := v.Index(i).Interface().(DocElem)
|
||||||
|
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if et == typeRawDocElem {
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
elem := v.Index(i).Interface().(RawDocElem)
|
||||||
|
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
e.addElem(itoa(i), v.Index(i), false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Marshaling of elements in a document.
|
||||||
|
|
||||||
|
func (e *encoder) addElemName(kind byte, name string) {
|
||||||
|
e.addBytes(kind)
|
||||||
|
e.addBytes([]byte(name)...)
|
||||||
|
e.addBytes(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
|
||||||
|
|
||||||
|
if !v.IsValid() {
|
||||||
|
e.addElemName(0x0A, name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if getter, ok := v.Interface().(Getter); ok {
|
||||||
|
getv, err := getter.GetBSON()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
e.addElem(name, reflect.ValueOf(getv), minSize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
e.addElem(name, v.Elem(), minSize)
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
e.addElem(name, v.Elem(), minSize)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
s := v.String()
|
||||||
|
switch v.Type() {
|
||||||
|
case typeObjectId:
|
||||||
|
if len(s) != 12 {
|
||||||
|
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||||
|
strconv.Itoa(len(s)) + ")")
|
||||||
|
}
|
||||||
|
e.addElemName(0x07, name)
|
||||||
|
e.addBytes([]byte(s)...)
|
||||||
|
case typeSymbol:
|
||||||
|
e.addElemName(0x0E, name)
|
||||||
|
e.addStr(s)
|
||||||
|
case typeJSONNumber:
|
||||||
|
n := v.Interface().(json.Number)
|
||||||
|
if i, err := n.Int64(); err == nil {
|
||||||
|
e.addElemName(0x12, name)
|
||||||
|
e.addInt64(i)
|
||||||
|
} else if f, err := n.Float64(); err == nil {
|
||||||
|
e.addElemName(0x01, name)
|
||||||
|
e.addFloat64(f)
|
||||||
|
} else {
|
||||||
|
panic("failed to convert json.Number to a number: " + s)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
e.addElemName(0x02, name)
|
||||||
|
e.addStr(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
e.addElemName(0x01, name)
|
||||||
|
e.addFloat64(v.Float())
|
||||||
|
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
u := v.Uint()
|
||||||
|
if int64(u) < 0 {
|
||||||
|
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
|
||||||
|
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
|
||||||
|
e.addElemName(0x10, name)
|
||||||
|
e.addInt32(int32(u))
|
||||||
|
} else {
|
||||||
|
e.addElemName(0x12, name)
|
||||||
|
e.addInt64(int64(u))
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
switch v.Type() {
|
||||||
|
case typeMongoTimestamp:
|
||||||
|
e.addElemName(0x11, name)
|
||||||
|
e.addInt64(v.Int())
|
||||||
|
|
||||||
|
case typeOrderKey:
|
||||||
|
if v.Int() == int64(MaxKey) {
|
||||||
|
e.addElemName(0x7F, name)
|
||||||
|
} else {
|
||||||
|
e.addElemName(0xFF, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
i := v.Int()
|
||||||
|
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
|
||||||
|
// It fits into an int32, encode as such.
|
||||||
|
e.addElemName(0x10, name)
|
||||||
|
e.addInt32(int32(i))
|
||||||
|
} else {
|
||||||
|
e.addElemName(0x12, name)
|
||||||
|
e.addInt64(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
e.addElemName(0x08, name)
|
||||||
|
if v.Bool() {
|
||||||
|
e.addBytes(1)
|
||||||
|
} else {
|
||||||
|
e.addBytes(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
e.addElemName(0x03, name)
|
||||||
|
e.addDoc(v)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
vt := v.Type()
|
||||||
|
et := vt.Elem()
|
||||||
|
if et.Kind() == reflect.Uint8 {
|
||||||
|
e.addElemName(0x05, name)
|
||||||
|
e.addBinary(0x00, v.Bytes())
|
||||||
|
} else if et == typeDocElem || et == typeRawDocElem {
|
||||||
|
e.addElemName(0x03, name)
|
||||||
|
e.addDoc(v)
|
||||||
|
} else {
|
||||||
|
e.addElemName(0x04, name)
|
||||||
|
e.addDoc(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
et := v.Type().Elem()
|
||||||
|
if et.Kind() == reflect.Uint8 {
|
||||||
|
e.addElemName(0x05, name)
|
||||||
|
if v.CanAddr() {
|
||||||
|
e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
|
||||||
|
} else {
|
||||||
|
n := v.Len()
|
||||||
|
e.addInt32(int32(n))
|
||||||
|
e.addBytes(0x00)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
el := v.Index(i)
|
||||||
|
e.addBytes(byte(el.Uint()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
e.addElemName(0x04, name)
|
||||||
|
e.addDoc(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
switch s := v.Interface().(type) {
|
||||||
|
|
||||||
|
case Raw:
|
||||||
|
kind := s.Kind
|
||||||
|
if kind == 0x00 {
|
||||||
|
kind = 0x03
|
||||||
|
}
|
||||||
|
if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
|
||||||
|
panic("Attempted to marshal empty Raw document")
|
||||||
|
}
|
||||||
|
e.addElemName(kind, name)
|
||||||
|
e.addBytes(s.Data...)
|
||||||
|
|
||||||
|
case Binary:
|
||||||
|
e.addElemName(0x05, name)
|
||||||
|
e.addBinary(s.Kind, s.Data)
|
||||||
|
|
||||||
|
case Decimal128:
|
||||||
|
e.addElemName(0x13, name)
|
||||||
|
e.addInt64(int64(s.l))
|
||||||
|
e.addInt64(int64(s.h))
|
||||||
|
|
||||||
|
case DBPointer:
|
||||||
|
e.addElemName(0x0C, name)
|
||||||
|
e.addStr(s.Namespace)
|
||||||
|
if len(s.Id) != 12 {
|
||||||
|
panic("ObjectIDs must be exactly 12 bytes long (got " +
|
||||||
|
strconv.Itoa(len(s.Id)) + ")")
|
||||||
|
}
|
||||||
|
e.addBytes([]byte(s.Id)...)
|
||||||
|
|
||||||
|
case RegEx:
|
||||||
|
e.addElemName(0x0B, name)
|
||||||
|
e.addCStr(s.Pattern)
|
||||||
|
e.addCStr(s.Options)
|
||||||
|
|
||||||
|
case JavaScript:
|
||||||
|
if s.Scope == nil {
|
||||||
|
e.addElemName(0x0D, name)
|
||||||
|
e.addStr(s.Code)
|
||||||
|
} else {
|
||||||
|
e.addElemName(0x0F, name)
|
||||||
|
start := e.reserveInt32()
|
||||||
|
e.addStr(s.Code)
|
||||||
|
e.addDoc(reflect.ValueOf(s.Scope))
|
||||||
|
e.setInt32(start, int32(len(e.out)-start))
|
||||||
|
}
|
||||||
|
|
||||||
|
case time.Time:
|
||||||
|
// MongoDB handles timestamps as milliseconds.
|
||||||
|
e.addElemName(0x09, name)
|
||||||
|
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
|
||||||
|
|
||||||
|
case url.URL:
|
||||||
|
e.addElemName(0x02, name)
|
||||||
|
e.addStr(s.String())
|
||||||
|
|
||||||
|
case undefined:
|
||||||
|
e.addElemName(0x06, name)
|
||||||
|
|
||||||
|
default:
|
||||||
|
e.addElemName(0x03, name)
|
||||||
|
e.addDoc(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("Can't marshal " + v.Type().String() + " in a BSON document")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Marshaling of base types.
|
||||||
|
|
||||||
|
func (e *encoder) addBinary(subtype byte, v []byte) {
|
||||||
|
if subtype == 0x02 {
|
||||||
|
// Wonder how that brilliant idea came to life. Obsolete, luckily.
|
||||||
|
e.addInt32(int32(len(v) + 4))
|
||||||
|
e.addBytes(subtype)
|
||||||
|
e.addInt32(int32(len(v)))
|
||||||
|
} else {
|
||||||
|
e.addInt32(int32(len(v)))
|
||||||
|
e.addBytes(subtype)
|
||||||
|
}
|
||||||
|
e.addBytes(v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addStr(v string) {
|
||||||
|
e.addInt32(int32(len(v) + 1))
|
||||||
|
e.addCStr(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addCStr(v string) {
|
||||||
|
e.addBytes([]byte(v)...)
|
||||||
|
e.addBytes(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) reserveInt32() (pos int) {
|
||||||
|
pos = len(e.out)
|
||||||
|
e.addBytes(0, 0, 0, 0)
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) setInt32(pos int, v int32) {
|
||||||
|
e.out[pos+0] = byte(v)
|
||||||
|
e.out[pos+1] = byte(v >> 8)
|
||||||
|
e.out[pos+2] = byte(v >> 16)
|
||||||
|
e.out[pos+3] = byte(v >> 24)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addInt32(v int32) {
|
||||||
|
u := uint32(v)
|
||||||
|
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addInt64(v int64) {
|
||||||
|
u := uint64(v)
|
||||||
|
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
|
||||||
|
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addFloat64(v float64) {
|
||||||
|
e.addInt64(int64(math.Float64bits(v)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) addBytes(v ...byte) {
|
||||||
|
e.out = append(e.out, v...)
|
||||||
|
}
|
|
@ -0,0 +1,380 @@
|
||||||
|
package bson
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"gopkg.in/mgo.v2/internal/json"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals a JSON value that may hold non-standard
|
||||||
|
// syntax as defined in BSON's extended JSON specification.
|
||||||
|
func UnmarshalJSON(data []byte, value interface{}) error {
|
||||||
|
d := json.NewDecoder(bytes.NewBuffer(data))
|
||||||
|
d.Extend(&jsonExt)
|
||||||
|
return d.Decode(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON marshals a JSON value that may hold non-standard
|
||||||
|
// syntax as defined in BSON's extended JSON specification.
|
||||||
|
func MarshalJSON(value interface{}) ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
e := json.NewEncoder(&buf)
|
||||||
|
e.Extend(&jsonExt)
|
||||||
|
err := e.Encode(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// jdec is used internally by the JSON decoding functions
|
||||||
|
// so they may unmarshal functions without getting into endless
|
||||||
|
// recursion due to keyed objects.
|
||||||
|
func jdec(data []byte, value interface{}) error {
|
||||||
|
d := json.NewDecoder(bytes.NewBuffer(data))
|
||||||
|
d.Extend(&funcExt)
|
||||||
|
return d.Decode(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
var jsonExt json.Extension
|
||||||
|
var funcExt json.Extension
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
// - Shell regular expressions ("/regexp/opts")
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
jsonExt.DecodeUnquotedKeys(true)
|
||||||
|
jsonExt.DecodeTrailingCommas(true)
|
||||||
|
|
||||||
|
funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
|
||||||
|
jsonExt.DecodeKeyed("$binary", jdecBinary)
|
||||||
|
jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
|
||||||
|
jsonExt.EncodeType([]byte(nil), jencBinarySlice)
|
||||||
|
jsonExt.EncodeType(Binary{}, jencBinaryType)
|
||||||
|
|
||||||
|
funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
|
||||||
|
funcExt.DecodeFunc("new Date", "$dateFunc", "S")
|
||||||
|
jsonExt.DecodeKeyed("$date", jdecDate)
|
||||||
|
jsonExt.DecodeKeyed("$dateFunc", jdecDate)
|
||||||
|
jsonExt.EncodeType(time.Time{}, jencDate)
|
||||||
|
|
||||||
|
funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
|
||||||
|
jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
|
||||||
|
jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
|
||||||
|
|
||||||
|
funcExt.DecodeConst("undefined", Undefined)
|
||||||
|
|
||||||
|
jsonExt.DecodeKeyed("$regex", jdecRegEx)
|
||||||
|
jsonExt.EncodeType(RegEx{}, jencRegEx)
|
||||||
|
|
||||||
|
funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
|
||||||
|
jsonExt.DecodeKeyed("$oid", jdecObjectId)
|
||||||
|
jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
|
||||||
|
jsonExt.EncodeType(ObjectId(""), jencObjectId)
|
||||||
|
|
||||||
|
funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
|
||||||
|
jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
|
||||||
|
|
||||||
|
funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
|
||||||
|
jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
|
||||||
|
jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
|
||||||
|
jsonExt.EncodeType(int64(0), jencNumberLong)
|
||||||
|
jsonExt.EncodeType(int(0), jencInt)
|
||||||
|
|
||||||
|
funcExt.DecodeConst("MinKey", MinKey)
|
||||||
|
funcExt.DecodeConst("MaxKey", MaxKey)
|
||||||
|
jsonExt.DecodeKeyed("$minKey", jdecMinKey)
|
||||||
|
jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
|
||||||
|
jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
|
||||||
|
|
||||||
|
jsonExt.DecodeKeyed("$undefined", jdecUndefined)
|
||||||
|
jsonExt.EncodeType(Undefined, jencUndefined)
|
||||||
|
|
||||||
|
jsonExt.Extend(&funcExt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fbytes(format string, args ...interface{}) []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fmt.Fprintf(&buf, format, args...)
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecBinary(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
Binary []byte `json:"$binary"`
|
||||||
|
Type string `json:"$type"`
|
||||||
|
Func struct {
|
||||||
|
Binary []byte `json:"$binary"`
|
||||||
|
Type int64 `json:"$type"`
|
||||||
|
} `json:"$binaryFunc"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var binData []byte
|
||||||
|
var binKind int64
|
||||||
|
if v.Type == "" && v.Binary == nil {
|
||||||
|
binData = v.Func.Binary
|
||||||
|
binKind = v.Func.Type
|
||||||
|
} else if v.Type == "" {
|
||||||
|
return v.Binary, nil
|
||||||
|
} else {
|
||||||
|
binData = v.Binary
|
||||||
|
binKind, err = strconv.ParseInt(v.Type, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
binKind = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if binKind == 0 {
|
||||||
|
return binData, nil
|
||||||
|
}
|
||||||
|
if binKind < 0 || binKind > 255 {
|
||||||
|
return nil, fmt.Errorf("invalid type in binary object: %s", data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Binary{Kind: byte(binKind), Data: binData}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencBinarySlice(v interface{}) ([]byte, error) {
|
||||||
|
in := v.([]byte)
|
||||||
|
out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
|
||||||
|
base64.StdEncoding.Encode(out, in)
|
||||||
|
return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencBinaryType(v interface{}) ([]byte, error) {
|
||||||
|
in := v.(Binary)
|
||||||
|
out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
|
||||||
|
base64.StdEncoding.Encode(out, in.Data)
|
||||||
|
return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const jdateFormat = "2006-01-02T15:04:05.999Z"
|
||||||
|
|
||||||
|
func jdecDate(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
S string `json:"$date"`
|
||||||
|
Func struct {
|
||||||
|
S string
|
||||||
|
} `json:"$dateFunc"`
|
||||||
|
}
|
||||||
|
_ = jdec(data, &v)
|
||||||
|
if v.S == "" {
|
||||||
|
v.S = v.Func.S
|
||||||
|
}
|
||||||
|
if v.S != "" {
|
||||||
|
for _, format := range []string{jdateFormat, "2006-01-02"} {
|
||||||
|
t, err := time.Parse(format, v.S)
|
||||||
|
if err == nil {
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("cannot parse date: %q", v.S)
|
||||||
|
}
|
||||||
|
|
||||||
|
var vn struct {
|
||||||
|
Date struct {
|
||||||
|
N int64 `json:"$numberLong,string"`
|
||||||
|
} `json:"$date"`
|
||||||
|
Func struct {
|
||||||
|
S int64
|
||||||
|
} `json:"$dateFunc"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &vn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot parse date: %q", data)
|
||||||
|
}
|
||||||
|
n := vn.Date.N
|
||||||
|
if n == 0 {
|
||||||
|
n = vn.Func.S
|
||||||
|
}
|
||||||
|
return time.Unix(n/1000, n%1000*1e6).UTC(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencDate(v interface{}) ([]byte, error) {
|
||||||
|
t := v.(time.Time)
|
||||||
|
return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecTimestamp(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
Func struct {
|
||||||
|
T int32 `json:"t"`
|
||||||
|
I int32 `json:"i"`
|
||||||
|
} `json:"$timestamp"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencTimestamp(v interface{}) ([]byte, error) {
|
||||||
|
ts := uint64(v.(MongoTimestamp))
|
||||||
|
return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecRegEx(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
Regex string `json:"$regex"`
|
||||||
|
Options string `json:"$options"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return RegEx{v.Regex, v.Options}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencRegEx(v interface{}) ([]byte, error) {
|
||||||
|
re := v.(RegEx)
|
||||||
|
type regex struct {
|
||||||
|
Regex string `json:"$regex"`
|
||||||
|
Options string `json:"$options"`
|
||||||
|
}
|
||||||
|
return json.Marshal(regex{re.Pattern, re.Options})
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecObjectId(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
Id string `json:"$oid"`
|
||||||
|
Func struct {
|
||||||
|
Id string
|
||||||
|
} `json:"$oidFunc"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if v.Id == "" {
|
||||||
|
v.Id = v.Func.Id
|
||||||
|
}
|
||||||
|
return ObjectIdHex(v.Id), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencObjectId(v interface{}) ([]byte, error) {
|
||||||
|
return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecDBRef(data []byte) (interface{}, error) {
|
||||||
|
// TODO Support unmarshaling $ref and $id into the input value.
|
||||||
|
var v struct {
|
||||||
|
Obj map[string]interface{} `json:"$dbrefFunc"`
|
||||||
|
}
|
||||||
|
// TODO Fix this. Must not be required.
|
||||||
|
v.Obj = make(map[string]interface{})
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v.Obj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecNumberLong(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
N int64 `json:"$numberLong,string"`
|
||||||
|
Func struct {
|
||||||
|
N int64 `json:",string"`
|
||||||
|
} `json:"$numberLongFunc"`
|
||||||
|
}
|
||||||
|
var vn struct {
|
||||||
|
N int64 `json:"$numberLong"`
|
||||||
|
Func struct {
|
||||||
|
N int64
|
||||||
|
} `json:"$numberLongFunc"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
err = jdec(data, &vn)
|
||||||
|
v.N = vn.N
|
||||||
|
v.Func.N = vn.Func.N
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if v.N != 0 {
|
||||||
|
return v.N, nil
|
||||||
|
}
|
||||||
|
return v.Func.N, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencNumberLong(v interface{}) ([]byte, error) {
|
||||||
|
n := v.(int64)
|
||||||
|
f := `{"$numberLong":"%d"}`
|
||||||
|
if n <= 1<<53 {
|
||||||
|
f = `{"$numberLong":%d}`
|
||||||
|
}
|
||||||
|
return fbytes(f, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencInt(v interface{}) ([]byte, error) {
|
||||||
|
n := v.(int)
|
||||||
|
f := `{"$numberLong":"%d"}`
|
||||||
|
if int64(n) <= 1<<53 {
|
||||||
|
f = `%d`
|
||||||
|
}
|
||||||
|
return fbytes(f, n), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecMinKey(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
N int64 `json:"$minKey"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if v.N != 1 {
|
||||||
|
return nil, fmt.Errorf("invalid $minKey object: %s", data)
|
||||||
|
}
|
||||||
|
return MinKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecMaxKey(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
N int64 `json:"$maxKey"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if v.N != 1 {
|
||||||
|
return nil, fmt.Errorf("invalid $maxKey object: %s", data)
|
||||||
|
}
|
||||||
|
return MaxKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencMinMaxKey(v interface{}) ([]byte, error) {
|
||||||
|
switch v.(orderKey) {
|
||||||
|
case MinKey:
|
||||||
|
return []byte(`{"$minKey":1}`), nil
|
||||||
|
case MaxKey:
|
||||||
|
return []byte(`{"$maxKey":1}`), nil
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func jdecUndefined(data []byte) (interface{}, error) {
|
||||||
|
var v struct {
|
||||||
|
B bool `json:"$undefined"`
|
||||||
|
}
|
||||||
|
err := jdec(data, &v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !v.B {
|
||||||
|
return nil, fmt.Errorf("invalid $undefined object: %s", data)
|
||||||
|
}
|
||||||
|
return Undefined, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jencUndefined(v interface{}) ([]byte, error) {
|
||||||
|
return []byte(`{"$undefined":true}`), nil
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,95 @@
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Extension holds a set of additional rules to be used when unmarshaling
|
||||||
|
// strict JSON or JSON-like content.
|
||||||
|
type Extension struct {
|
||||||
|
funcs map[string]funcExt
|
||||||
|
consts map[string]interface{}
|
||||||
|
keyed map[string]func([]byte) (interface{}, error)
|
||||||
|
encode map[reflect.Type]func(v interface{}) ([]byte, error)
|
||||||
|
|
||||||
|
unquotedKeys bool
|
||||||
|
trailingCommas bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type funcExt struct {
|
||||||
|
key string
|
||||||
|
args []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend changes the decoder behavior to consider the provided extension.
|
||||||
|
func (dec *Decoder) Extend(ext *Extension) { dec.d.ext = *ext }
|
||||||
|
|
||||||
|
// Extend changes the encoder behavior to consider the provided extension.
|
||||||
|
func (enc *Encoder) Extend(ext *Extension) { enc.ext = *ext }
|
||||||
|
|
||||||
|
// Extend includes in e the extensions defined in ext.
|
||||||
|
func (e *Extension) Extend(ext *Extension) {
|
||||||
|
for name, fext := range ext.funcs {
|
||||||
|
e.DecodeFunc(name, fext.key, fext.args...)
|
||||||
|
}
|
||||||
|
for name, value := range ext.consts {
|
||||||
|
e.DecodeConst(name, value)
|
||||||
|
}
|
||||||
|
for key, decode := range ext.keyed {
|
||||||
|
e.DecodeKeyed(key, decode)
|
||||||
|
}
|
||||||
|
for typ, encode := range ext.encode {
|
||||||
|
if e.encode == nil {
|
||||||
|
e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
|
||||||
|
}
|
||||||
|
e.encode[typ] = encode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeFunc defines a function call that may be observed inside JSON content.
|
||||||
|
// A function with the provided name will be unmarshaled as the document
|
||||||
|
// {key: {args[0]: ..., args[N]: ...}}.
|
||||||
|
func (e *Extension) DecodeFunc(name string, key string, args ...string) {
|
||||||
|
if e.funcs == nil {
|
||||||
|
e.funcs = make(map[string]funcExt)
|
||||||
|
}
|
||||||
|
e.funcs[name] = funcExt{key, args}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeConst defines a constant name that may be observed inside JSON content
|
||||||
|
// and will be decoded with the provided value.
|
||||||
|
func (e *Extension) DecodeConst(name string, value interface{}) {
|
||||||
|
if e.consts == nil {
|
||||||
|
e.consts = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
e.consts[name] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeKeyed defines a key that when observed as the first element inside a
|
||||||
|
// JSON document triggers the decoding of that document via the provided
|
||||||
|
// decode function.
|
||||||
|
func (e *Extension) DecodeKeyed(key string, decode func(data []byte) (interface{}, error)) {
|
||||||
|
if e.keyed == nil {
|
||||||
|
e.keyed = make(map[string]func([]byte) (interface{}, error))
|
||||||
|
}
|
||||||
|
e.keyed[key] = decode
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeUnquotedKeys defines whether to accept map keys that are unquoted strings.
|
||||||
|
func (e *Extension) DecodeUnquotedKeys(accept bool) {
|
||||||
|
e.unquotedKeys = accept
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeTrailingCommas defines whether to accept trailing commas in maps and arrays.
|
||||||
|
func (e *Extension) DecodeTrailingCommas(accept bool) {
|
||||||
|
e.trailingCommas = accept
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeType registers a function to encode values with the same type of the
|
||||||
|
// provided sample.
|
||||||
|
func (e *Extension) EncodeType(sample interface{}, encode func(v interface{}) ([]byte, error)) {
|
||||||
|
if e.encode == nil {
|
||||||
|
e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
|
||||||
|
}
|
||||||
|
e.encode[reflect.TypeOf(sample)] = encode
|
||||||
|
}
|
|
@ -0,0 +1,143 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||||
|
kelvin = '\u212a'
|
||||||
|
smallLongEss = '\u017f'
|
||||||
|
)
|
||||||
|
|
||||||
|
// foldFunc returns one of four different case folding equivalence
|
||||||
|
// functions, from most general (and slow) to fastest:
|
||||||
|
//
|
||||||
|
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||||
|
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||||
|
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||||
|
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||||
|
//
|
||||||
|
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||||
|
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||||
|
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||||
|
// See https://play.golang.org/p/tTxjOc0OGo
|
||||||
|
//
|
||||||
|
// The returned function is specialized for matching against s and
|
||||||
|
// should only be given s. It's not curried for performance reasons.
|
||||||
|
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||||
|
nonLetter := false
|
||||||
|
special := false // special letter
|
||||||
|
for _, b := range s {
|
||||||
|
if b >= utf8.RuneSelf {
|
||||||
|
return bytes.EqualFold
|
||||||
|
}
|
||||||
|
upper := b & caseMask
|
||||||
|
if upper < 'A' || upper > 'Z' {
|
||||||
|
nonLetter = true
|
||||||
|
} else if upper == 'K' || upper == 'S' {
|
||||||
|
// See above for why these letters are special.
|
||||||
|
special = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if special {
|
||||||
|
return equalFoldRight
|
||||||
|
}
|
||||||
|
if nonLetter {
|
||||||
|
return asciiEqualFold
|
||||||
|
}
|
||||||
|
return simpleLetterEqualFold
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||||
|
// known to be all ASCII (including punctuation), but contains an 's',
|
||||||
|
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func equalFoldRight(s, t []byte) bool {
|
||||||
|
for _, sb := range s {
|
||||||
|
if len(t) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
tb := t[0]
|
||||||
|
if tb < utf8.RuneSelf {
|
||||||
|
if sb != tb {
|
||||||
|
sbUpper := sb & caseMask
|
||||||
|
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||||
|
if sbUpper != tb&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t = t[1:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// sb is ASCII and t is not. t must be either kelvin
|
||||||
|
// sign or long s; sb must be s, S, k, or K.
|
||||||
|
tr, size := utf8.DecodeRune(t)
|
||||||
|
switch sb {
|
||||||
|
case 's', 'S':
|
||||||
|
if tr != smallLongEss {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case 'k', 'K':
|
||||||
|
if tr != kelvin {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
t = t[size:]
|
||||||
|
|
||||||
|
}
|
||||||
|
if len(t) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||||
|
// s is all ASCII (but may contain non-letters) and contains no
|
||||||
|
// special-folding letters.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func asciiEqualFold(s, t []byte) bool {
|
||||||
|
if len(s) != len(t) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, sb := range s {
|
||||||
|
tb := t[i]
|
||||||
|
if sb == tb {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||||
|
if sb&caseMask != tb&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||||
|
// use when s is all ASCII letters (no underscores, etc) and also
|
||||||
|
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func simpleLetterEqualFold(s, t []byte) bool {
|
||||||
|
if len(s) != len(t) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, b := range s {
|
||||||
|
if b&caseMask != t[i]&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
|
@ -0,0 +1,141 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
// Compact appends to dst the JSON-encoded src with
|
||||||
|
// insignificant space characters elided.
|
||||||
|
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||||
|
return compact(dst, src, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||||
|
origLen := dst.Len()
|
||||||
|
var scan scanner
|
||||||
|
scan.reset()
|
||||||
|
start := 0
|
||||||
|
for i, c := range src {
|
||||||
|
if escape && (c == '<' || c == '>' || c == '&') {
|
||||||
|
if start < i {
|
||||||
|
dst.Write(src[start:i])
|
||||||
|
}
|
||||||
|
dst.WriteString(`\u00`)
|
||||||
|
dst.WriteByte(hex[c>>4])
|
||||||
|
dst.WriteByte(hex[c&0xF])
|
||||||
|
start = i + 1
|
||||||
|
}
|
||||||
|
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||||
|
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||||
|
if start < i {
|
||||||
|
dst.Write(src[start:i])
|
||||||
|
}
|
||||||
|
dst.WriteString(`\u202`)
|
||||||
|
dst.WriteByte(hex[src[i+2]&0xF])
|
||||||
|
start = i + 3
|
||||||
|
}
|
||||||
|
v := scan.step(&scan, c)
|
||||||
|
if v >= scanSkipSpace {
|
||||||
|
if v == scanError {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if start < i {
|
||||||
|
dst.Write(src[start:i])
|
||||||
|
}
|
||||||
|
start = i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scan.eof() == scanError {
|
||||||
|
dst.Truncate(origLen)
|
||||||
|
return scan.err
|
||||||
|
}
|
||||||
|
if start < len(src) {
|
||||||
|
dst.Write(src[start:])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||||
|
dst.WriteByte('\n')
|
||||||
|
dst.WriteString(prefix)
|
||||||
|
for i := 0; i < depth; i++ {
|
||||||
|
dst.WriteString(indent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||||
|
// Each element in a JSON object or array begins on a new,
|
||||||
|
// indented line beginning with prefix followed by one or more
|
||||||
|
// copies of indent according to the indentation nesting.
|
||||||
|
// The data appended to dst does not begin with the prefix nor
|
||||||
|
// any indentation, to make it easier to embed inside other formatted JSON data.
|
||||||
|
// Although leading space characters (space, tab, carriage return, newline)
|
||||||
|
// at the beginning of src are dropped, trailing space characters
|
||||||
|
// at the end of src are preserved and copied to dst.
|
||||||
|
// For example, if src has no trailing spaces, neither will dst;
|
||||||
|
// if src ends in a trailing newline, so will dst.
|
||||||
|
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||||
|
origLen := dst.Len()
|
||||||
|
var scan scanner
|
||||||
|
scan.reset()
|
||||||
|
needIndent := false
|
||||||
|
depth := 0
|
||||||
|
for _, c := range src {
|
||||||
|
scan.bytes++
|
||||||
|
v := scan.step(&scan, c)
|
||||||
|
if v == scanSkipSpace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v == scanError {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||||
|
needIndent = false
|
||||||
|
depth++
|
||||||
|
newline(dst, prefix, indent, depth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit semantically uninteresting bytes
|
||||||
|
// (in particular, punctuation in strings) unmodified.
|
||||||
|
if v == scanContinue {
|
||||||
|
dst.WriteByte(c)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add spacing around real punctuation.
|
||||||
|
switch c {
|
||||||
|
case '{', '[':
|
||||||
|
// delay indent so that empty object and array are formatted as {} and [].
|
||||||
|
needIndent = true
|
||||||
|
dst.WriteByte(c)
|
||||||
|
|
||||||
|
case ',':
|
||||||
|
dst.WriteByte(c)
|
||||||
|
newline(dst, prefix, indent, depth)
|
||||||
|
|
||||||
|
case ':':
|
||||||
|
dst.WriteByte(c)
|
||||||
|
dst.WriteByte(' ')
|
||||||
|
|
||||||
|
case '}', ']':
|
||||||
|
if needIndent {
|
||||||
|
// suppress indent in empty object/array
|
||||||
|
needIndent = false
|
||||||
|
} else {
|
||||||
|
depth--
|
||||||
|
newline(dst, prefix, indent, depth)
|
||||||
|
}
|
||||||
|
dst.WriteByte(c)
|
||||||
|
|
||||||
|
default:
|
||||||
|
dst.WriteByte(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scan.eof() == scanError {
|
||||||
|
dst.Truncate(origLen)
|
||||||
|
return scan.err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,697 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
// JSON value parser state machine.
|
||||||
|
// Just about at the limit of what is reasonable to write by hand.
|
||||||
|
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||||
|
// otherwise common code from the multiple scanning functions
|
||||||
|
// in this package (Compact, Indent, checkValid, nextValue, etc).
|
||||||
|
//
|
||||||
|
// This file starts with two simple examples using the scanner
|
||||||
|
// before diving into the scanner itself.
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
// checkValid verifies that data is valid JSON-encoded data.
|
||||||
|
// scan is passed in for use by checkValid to avoid an allocation.
|
||||||
|
func checkValid(data []byte, scan *scanner) error {
|
||||||
|
scan.reset()
|
||||||
|
for _, c := range data {
|
||||||
|
scan.bytes++
|
||||||
|
if scan.step(scan, c) == scanError {
|
||||||
|
return scan.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scan.eof() == scanError {
|
||||||
|
return scan.err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// nextValue splits data after the next whole JSON value,
|
||||||
|
// returning that value and the bytes that follow it as separate slices.
|
||||||
|
// scan is passed in for use by nextValue to avoid an allocation.
|
||||||
|
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
|
||||||
|
scan.reset()
|
||||||
|
for i, c := range data {
|
||||||
|
v := scan.step(scan, c)
|
||||||
|
if v >= scanEndObject {
|
||||||
|
switch v {
|
||||||
|
// probe the scanner with a space to determine whether we will
|
||||||
|
// get scanEnd on the next character. Otherwise, if the next character
|
||||||
|
// is not a space, scanEndTop allocates a needless error.
|
||||||
|
case scanEndObject, scanEndArray, scanEndParams:
|
||||||
|
if scan.step(scan, ' ') == scanEnd {
|
||||||
|
return data[:i+1], data[i+1:], nil
|
||||||
|
}
|
||||||
|
case scanError:
|
||||||
|
return nil, nil, scan.err
|
||||||
|
case scanEnd:
|
||||||
|
return data[:i], data[i:], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scan.eof() == scanError {
|
||||||
|
return nil, nil, scan.err
|
||||||
|
}
|
||||||
|
return data, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SyntaxError is a description of a JSON syntax error.
|
||||||
|
type SyntaxError struct {
|
||||||
|
msg string // description of error
|
||||||
|
Offset int64 // error occurred after reading Offset bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) Error() string { return e.msg }
|
||||||
|
|
||||||
|
// A scanner is a JSON scanning state machine.
|
||||||
|
// Callers call scan.reset() and then pass bytes in one at a time
|
||||||
|
// by calling scan.step(&scan, c) for each byte.
|
||||||
|
// The return value, referred to as an opcode, tells the
|
||||||
|
// caller about significant parsing events like beginning
|
||||||
|
// and ending literals, objects, and arrays, so that the
|
||||||
|
// caller can follow along if it wishes.
|
||||||
|
// The return value scanEnd indicates that a single top-level
|
||||||
|
// JSON value has been completed, *before* the byte that
|
||||||
|
// just got passed in. (The indication must be delayed in order
|
||||||
|
// to recognize the end of numbers: is 123 a whole value or
|
||||||
|
// the beginning of 12345e+6?).
|
||||||
|
type scanner struct {
|
||||||
|
// The step is a func to be called to execute the next transition.
|
||||||
|
// Also tried using an integer constant and a single func
|
||||||
|
// with a switch, but using the func directly was 10% faster
|
||||||
|
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||||
|
step func(*scanner, byte) int
|
||||||
|
|
||||||
|
// Reached end of top-level value.
|
||||||
|
endTop bool
|
||||||
|
|
||||||
|
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||||
|
parseState []int
|
||||||
|
|
||||||
|
// Error that happened, if any.
|
||||||
|
err error
|
||||||
|
|
||||||
|
// 1-byte redo (see undo method)
|
||||||
|
redo bool
|
||||||
|
redoCode int
|
||||||
|
redoState func(*scanner, byte) int
|
||||||
|
|
||||||
|
// total bytes consumed, updated by decoder.Decode
|
||||||
|
bytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// These values are returned by the state transition functions
|
||||||
|
// assigned to scanner.state and the method scanner.eof.
|
||||||
|
// They give details about the current state of the scan that
|
||||||
|
// callers might be interested to know about.
|
||||||
|
// It is okay to ignore the return value of any particular
|
||||||
|
// call to scanner.state: if one call returns scanError,
|
||||||
|
// every subsequent call will return scanError too.
|
||||||
|
const (
|
||||||
|
// Continue.
|
||||||
|
scanContinue = iota // uninteresting byte
|
||||||
|
scanBeginLiteral // end implied by next result != scanContinue
|
||||||
|
scanBeginObject // begin object
|
||||||
|
scanObjectKey // just finished object key (string)
|
||||||
|
scanObjectValue // just finished non-last object value
|
||||||
|
scanEndObject // end object (implies scanObjectValue if possible)
|
||||||
|
scanBeginArray // begin array
|
||||||
|
scanArrayValue // just finished array value
|
||||||
|
scanEndArray // end array (implies scanArrayValue if possible)
|
||||||
|
scanBeginName // begin function call
|
||||||
|
scanParam // begin function argument
|
||||||
|
scanEndParams // end function call
|
||||||
|
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||||
|
|
||||||
|
// Stop.
|
||||||
|
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||||
|
scanError // hit an error, scanner.err.
|
||||||
|
)
|
||||||
|
|
||||||
|
// These values are stored in the parseState stack.
|
||||||
|
// They give the current state of a composite value
|
||||||
|
// being scanned. If the parser is inside a nested value
|
||||||
|
// the parseState describes the nested state, outermost at entry 0.
|
||||||
|
const (
|
||||||
|
parseObjectKey = iota // parsing object key (before colon)
|
||||||
|
parseObjectValue // parsing object value (after colon)
|
||||||
|
parseArrayValue // parsing array value
|
||||||
|
parseName // parsing unquoted name
|
||||||
|
parseParam // parsing function argument value
|
||||||
|
)
|
||||||
|
|
||||||
|
// reset prepares the scanner for use.
|
||||||
|
// It must be called before calling s.step.
|
||||||
|
func (s *scanner) reset() {
|
||||||
|
s.step = stateBeginValue
|
||||||
|
s.parseState = s.parseState[0:0]
|
||||||
|
s.err = nil
|
||||||
|
s.redo = false
|
||||||
|
s.endTop = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// eof tells the scanner that the end of input has been reached.
|
||||||
|
// It returns a scan status just as s.step does.
|
||||||
|
func (s *scanner) eof() int {
|
||||||
|
if s.err != nil {
|
||||||
|
return scanError
|
||||||
|
}
|
||||||
|
if s.endTop {
|
||||||
|
return scanEnd
|
||||||
|
}
|
||||||
|
s.step(s, ' ')
|
||||||
|
if s.endTop {
|
||||||
|
return scanEnd
|
||||||
|
}
|
||||||
|
if s.err == nil {
|
||||||
|
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||||
|
}
|
||||||
|
return scanError
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushParseState pushes a new parse state p onto the parse stack.
|
||||||
|
func (s *scanner) pushParseState(p int) {
|
||||||
|
s.parseState = append(s.parseState, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// popParseState pops a parse state (already obtained) off the stack
|
||||||
|
// and updates s.step accordingly.
|
||||||
|
func (s *scanner) popParseState() {
|
||||||
|
n := len(s.parseState) - 1
|
||||||
|
s.parseState = s.parseState[0:n]
|
||||||
|
s.redo = false
|
||||||
|
if n == 0 {
|
||||||
|
s.step = stateEndTop
|
||||||
|
s.endTop = true
|
||||||
|
} else {
|
||||||
|
s.step = stateEndValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSpace(c byte) bool {
|
||||||
|
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||||
|
func stateBeginValueOrEmpty(s *scanner, c byte) int {
|
||||||
|
if c <= ' ' && isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
if c == ']' {
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
return stateBeginValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateBeginValue is the state at the beginning of the input.
|
||||||
|
func stateBeginValue(s *scanner, c byte) int {
|
||||||
|
if c <= ' ' && isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '{':
|
||||||
|
s.step = stateBeginStringOrEmpty
|
||||||
|
s.pushParseState(parseObjectKey)
|
||||||
|
return scanBeginObject
|
||||||
|
case '[':
|
||||||
|
s.step = stateBeginValueOrEmpty
|
||||||
|
s.pushParseState(parseArrayValue)
|
||||||
|
return scanBeginArray
|
||||||
|
case '"':
|
||||||
|
s.step = stateInString
|
||||||
|
return scanBeginLiteral
|
||||||
|
case '-':
|
||||||
|
s.step = stateNeg
|
||||||
|
return scanBeginLiteral
|
||||||
|
case '0': // beginning of 0.123
|
||||||
|
s.step = state0
|
||||||
|
return scanBeginLiteral
|
||||||
|
case 'n':
|
||||||
|
s.step = stateNew0
|
||||||
|
return scanBeginName
|
||||||
|
}
|
||||||
|
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||||
|
s.step = state1
|
||||||
|
return scanBeginLiteral
|
||||||
|
}
|
||||||
|
if isName(c) {
|
||||||
|
s.step = stateName
|
||||||
|
return scanBeginName
|
||||||
|
}
|
||||||
|
return s.error(c, "looking for beginning of value")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isName(c byte) bool {
|
||||||
|
return c == '$' || c == '_' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||||
|
func stateBeginStringOrEmpty(s *scanner, c byte) int {
|
||||||
|
if c <= ' ' && isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
if c == '}' {
|
||||||
|
n := len(s.parseState)
|
||||||
|
s.parseState[n-1] = parseObjectValue
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
return stateBeginString(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateBeginString is the state after reading `{"key": value,`.
|
||||||
|
func stateBeginString(s *scanner, c byte) int {
|
||||||
|
if c <= ' ' && isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
if c == '"' {
|
||||||
|
s.step = stateInString
|
||||||
|
return scanBeginLiteral
|
||||||
|
}
|
||||||
|
if isName(c) {
|
||||||
|
s.step = stateName
|
||||||
|
return scanBeginName
|
||||||
|
}
|
||||||
|
return s.error(c, "looking for beginning of object key string")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateEndValue is the state after completing a value,
|
||||||
|
// such as after reading `{}` or `true` or `["x"`.
|
||||||
|
func stateEndValue(s *scanner, c byte) int {
|
||||||
|
n := len(s.parseState)
|
||||||
|
if n == 0 {
|
||||||
|
// Completed top-level before the current byte.
|
||||||
|
s.step = stateEndTop
|
||||||
|
s.endTop = true
|
||||||
|
return stateEndTop(s, c)
|
||||||
|
}
|
||||||
|
if c <= ' ' && isSpace(c) {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
ps := s.parseState[n-1]
|
||||||
|
switch ps {
|
||||||
|
case parseObjectKey:
|
||||||
|
if c == ':' {
|
||||||
|
s.parseState[n-1] = parseObjectValue
|
||||||
|
s.step = stateBeginValue
|
||||||
|
return scanObjectKey
|
||||||
|
}
|
||||||
|
return s.error(c, "after object key")
|
||||||
|
case parseObjectValue:
|
||||||
|
if c == ',' {
|
||||||
|
s.parseState[n-1] = parseObjectKey
|
||||||
|
s.step = stateBeginStringOrEmpty
|
||||||
|
return scanObjectValue
|
||||||
|
}
|
||||||
|
if c == '}' {
|
||||||
|
s.popParseState()
|
||||||
|
return scanEndObject
|
||||||
|
}
|
||||||
|
return s.error(c, "after object key:value pair")
|
||||||
|
case parseArrayValue:
|
||||||
|
if c == ',' {
|
||||||
|
s.step = stateBeginValueOrEmpty
|
||||||
|
return scanArrayValue
|
||||||
|
}
|
||||||
|
if c == ']' {
|
||||||
|
s.popParseState()
|
||||||
|
return scanEndArray
|
||||||
|
}
|
||||||
|
return s.error(c, "after array element")
|
||||||
|
case parseParam:
|
||||||
|
if c == ',' {
|
||||||
|
s.step = stateBeginValue
|
||||||
|
return scanParam
|
||||||
|
}
|
||||||
|
if c == ')' {
|
||||||
|
s.popParseState()
|
||||||
|
return scanEndParams
|
||||||
|
}
|
||||||
|
return s.error(c, "after array element")
|
||||||
|
}
|
||||||
|
return s.error(c, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateEndTop is the state after finishing the top-level value,
|
||||||
|
// such as after reading `{}` or `[1,2,3]`.
|
||||||
|
// Only space characters should be seen now.
|
||||||
|
func stateEndTop(s *scanner, c byte) int {
|
||||||
|
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
|
||||||
|
// Complain about non-space byte on next call.
|
||||||
|
s.error(c, "after top-level value")
|
||||||
|
}
|
||||||
|
return scanEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInString is the state after reading `"`.
|
||||||
|
func stateInString(s *scanner, c byte) int {
|
||||||
|
if c == '"' {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c == '\\' {
|
||||||
|
s.step = stateInStringEsc
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c < 0x20 {
|
||||||
|
return s.error(c, "in string literal")
|
||||||
|
}
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||||
|
func stateInStringEsc(s *scanner, c byte) int {
|
||||||
|
switch c {
|
||||||
|
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||||
|
s.step = stateInString
|
||||||
|
return scanContinue
|
||||||
|
case 'u':
|
||||||
|
s.step = stateInStringEscU
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in string escape code")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||||
|
func stateInStringEscU(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||||
|
s.step = stateInStringEscU1
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
// numbers
|
||||||
|
return s.error(c, "in \\u hexadecimal character escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||||
|
func stateInStringEscU1(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||||
|
s.step = stateInStringEscU12
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
// numbers
|
||||||
|
return s.error(c, "in \\u hexadecimal character escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||||
|
func stateInStringEscU12(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||||
|
s.step = stateInStringEscU123
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
// numbers
|
||||||
|
return s.error(c, "in \\u hexadecimal character escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||||
|
func stateInStringEscU123(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||||
|
s.step = stateInString
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
// numbers
|
||||||
|
return s.error(c, "in \\u hexadecimal character escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNeg is the state after reading `-` during a number.
|
||||||
|
func stateNeg(s *scanner, c byte) int {
|
||||||
|
if c == '0' {
|
||||||
|
s.step = state0
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if '1' <= c && c <= '9' {
|
||||||
|
s.step = state1
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in numeric literal")
|
||||||
|
}
|
||||||
|
|
||||||
|
// state1 is the state after reading a non-zero integer during a number,
|
||||||
|
// such as after reading `1` or `100` but not `0`.
|
||||||
|
func state1(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
s.step = state1
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return state0(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// state0 is the state after reading `0` during a number.
|
||||||
|
func state0(s *scanner, c byte) int {
|
||||||
|
if c == '.' {
|
||||||
|
s.step = stateDot
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c == 'e' || c == 'E' {
|
||||||
|
s.step = stateE
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateDot is the state after reading the integer and decimal point in a number,
|
||||||
|
// such as after reading `1.`.
|
||||||
|
func stateDot(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
s.step = stateDot0
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "after decimal point in numeric literal")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||||
|
// digits of a number, such as after reading `3.14`.
|
||||||
|
func stateDot0(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c == 'e' || c == 'E' {
|
||||||
|
s.step = stateE
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateE is the state after reading the mantissa and e in a number,
|
||||||
|
// such as after reading `314e` or `0.314e`.
|
||||||
|
func stateE(s *scanner, c byte) int {
|
||||||
|
if c == '+' || c == '-' {
|
||||||
|
s.step = stateESign
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateESign(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||||
|
// such as after reading `314e-` or `0.314e+`.
|
||||||
|
func stateESign(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
s.step = stateE0
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in exponent of numeric literal")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||||
|
// and at least one digit of the exponent in a number,
|
||||||
|
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||||
|
func stateE0(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNew0 is the state after reading `n`.
|
||||||
|
func stateNew0(s *scanner, c byte) int {
|
||||||
|
if c == 'e' {
|
||||||
|
s.step = stateNew1
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
s.step = stateName
|
||||||
|
return stateName(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNew1 is the state after reading `ne`.
|
||||||
|
func stateNew1(s *scanner, c byte) int {
|
||||||
|
if c == 'w' {
|
||||||
|
s.step = stateNew2
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
s.step = stateName
|
||||||
|
return stateName(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNew2 is the state after reading `new`.
|
||||||
|
func stateNew2(s *scanner, c byte) int {
|
||||||
|
s.step = stateName
|
||||||
|
if c == ' ' {
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateName(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateName is the state while reading an unquoted function name.
|
||||||
|
func stateName(s *scanner, c byte) int {
|
||||||
|
if isName(c) {
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c == '(' {
|
||||||
|
s.step = stateParamOrEmpty
|
||||||
|
s.pushParseState(parseParam)
|
||||||
|
return scanParam
|
||||||
|
}
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateParamOrEmpty is the state after reading `(`.
|
||||||
|
func stateParamOrEmpty(s *scanner, c byte) int {
|
||||||
|
if c <= ' ' && isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
if c == ')' {
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
return stateBeginValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateT is the state after reading `t`.
|
||||||
|
func stateT(s *scanner, c byte) int {
|
||||||
|
if c == 'r' {
|
||||||
|
s.step = stateTr
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal true (expecting 'r')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateTr is the state after reading `tr`.
|
||||||
|
func stateTr(s *scanner, c byte) int {
|
||||||
|
if c == 'u' {
|
||||||
|
s.step = stateTru
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal true (expecting 'u')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateTru is the state after reading `tru`.
|
||||||
|
func stateTru(s *scanner, c byte) int {
|
||||||
|
if c == 'e' {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal true (expecting 'e')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateF is the state after reading `f`.
|
||||||
|
func stateF(s *scanner, c byte) int {
|
||||||
|
if c == 'a' {
|
||||||
|
s.step = stateFa
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal false (expecting 'a')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateFa is the state after reading `fa`.
|
||||||
|
func stateFa(s *scanner, c byte) int {
|
||||||
|
if c == 'l' {
|
||||||
|
s.step = stateFal
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal false (expecting 'l')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateFal is the state after reading `fal`.
|
||||||
|
func stateFal(s *scanner, c byte) int {
|
||||||
|
if c == 's' {
|
||||||
|
s.step = stateFals
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal false (expecting 's')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateFals is the state after reading `fals`.
|
||||||
|
func stateFals(s *scanner, c byte) int {
|
||||||
|
if c == 'e' {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal false (expecting 'e')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateN is the state after reading `n`.
|
||||||
|
func stateN(s *scanner, c byte) int {
|
||||||
|
if c == 'u' {
|
||||||
|
s.step = stateNu
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal null (expecting 'u')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNu is the state after reading `nu`.
|
||||||
|
func stateNu(s *scanner, c byte) int {
|
||||||
|
if c == 'l' {
|
||||||
|
s.step = stateNul
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal null (expecting 'l')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNul is the state after reading `nul`.
|
||||||
|
func stateNul(s *scanner, c byte) int {
|
||||||
|
if c == 'l' {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal null (expecting 'l')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateError is the state after reaching a syntax error,
|
||||||
|
// such as after reading `[1}` or `5.1.2`.
|
||||||
|
func stateError(s *scanner, c byte) int {
|
||||||
|
return scanError
|
||||||
|
}
|
||||||
|
|
||||||
|
// error records an error and switches to the error state.
|
||||||
|
func (s *scanner) error(c byte, context string) int {
|
||||||
|
s.step = stateError
|
||||||
|
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||||
|
return scanError
|
||||||
|
}
|
||||||
|
|
||||||
|
// quoteChar formats c as a quoted character literal
|
||||||
|
func quoteChar(c byte) string {
|
||||||
|
// special cases - different from quoted strings
|
||||||
|
if c == '\'' {
|
||||||
|
return `'\''`
|
||||||
|
}
|
||||||
|
if c == '"' {
|
||||||
|
return `'"'`
|
||||||
|
}
|
||||||
|
|
||||||
|
// use quoted string with different quotation marks
|
||||||
|
s := strconv.Quote(string(c))
|
||||||
|
return "'" + s[1:len(s)-1] + "'"
|
||||||
|
}
|
||||||
|
|
||||||
|
// undo causes the scanner to return scanCode from the next state transition.
|
||||||
|
// This gives callers a simple 1-byte undo mechanism.
|
||||||
|
func (s *scanner) undo(scanCode int) {
|
||||||
|
if s.redo {
|
||||||
|
panic("json: invalid use of scanner")
|
||||||
|
}
|
||||||
|
s.redoCode = scanCode
|
||||||
|
s.redoState = s.step
|
||||||
|
s.step = stateRedo
|
||||||
|
s.redo = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateRedo helps implement the scanner's 1-byte undo.
|
||||||
|
func stateRedo(s *scanner, c byte) int {
|
||||||
|
s.redo = false
|
||||||
|
s.step = s.redoState
|
||||||
|
return s.redoCode
|
||||||
|
}
|
|
@ -0,0 +1,510 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Decoder reads and decodes JSON values from an input stream.
|
||||||
|
type Decoder struct {
|
||||||
|
r io.Reader
|
||||||
|
buf []byte
|
||||||
|
d decodeState
|
||||||
|
scanp int // start of unread data in buf
|
||||||
|
scan scanner
|
||||||
|
err error
|
||||||
|
|
||||||
|
tokenState int
|
||||||
|
tokenStack []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new decoder that reads from r.
|
||||||
|
//
|
||||||
|
// The decoder introduces its own buffering and may
|
||||||
|
// read data from r beyond the JSON values requested.
|
||||||
|
func NewDecoder(r io.Reader) *Decoder {
|
||||||
|
return &Decoder{r: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||||
|
// Number instead of as a float64.
|
||||||
|
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||||
|
|
||||||
|
// Decode reads the next JSON-encoded value from its
|
||||||
|
// input and stores it in the value pointed to by v.
|
||||||
|
//
|
||||||
|
// See the documentation for Unmarshal for details about
|
||||||
|
// the conversion of JSON into a Go value.
|
||||||
|
func (dec *Decoder) Decode(v interface{}) error {
|
||||||
|
if dec.err != nil {
|
||||||
|
return dec.err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dec.tokenPrepareForDecode(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !dec.tokenValueAllowed() {
|
||||||
|
return &SyntaxError{msg: "not at beginning of value"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read whole value into buffer.
|
||||||
|
n, err := dec.readValue()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
||||||
|
dec.scanp += n
|
||||||
|
|
||||||
|
// Don't save err from unmarshal into dec.err:
|
||||||
|
// the connection is still usable since we read a complete JSON
|
||||||
|
// object from it before the error happened.
|
||||||
|
err = dec.d.unmarshal(v)
|
||||||
|
|
||||||
|
// fixup token streaming state
|
||||||
|
dec.tokenValueEnd()
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffered returns a reader of the data remaining in the Decoder's
|
||||||
|
// buffer. The reader is valid until the next call to Decode.
|
||||||
|
func (dec *Decoder) Buffered() io.Reader {
|
||||||
|
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// readValue reads a JSON value into dec.buf.
|
||||||
|
// It returns the length of the encoding.
|
||||||
|
func (dec *Decoder) readValue() (int, error) {
|
||||||
|
dec.scan.reset()
|
||||||
|
|
||||||
|
scanp := dec.scanp
|
||||||
|
var err error
|
||||||
|
Input:
|
||||||
|
for {
|
||||||
|
// Look in the buffer for a new value.
|
||||||
|
for i, c := range dec.buf[scanp:] {
|
||||||
|
dec.scan.bytes++
|
||||||
|
v := dec.scan.step(&dec.scan, c)
|
||||||
|
if v == scanEnd {
|
||||||
|
scanp += i
|
||||||
|
break Input
|
||||||
|
}
|
||||||
|
// scanEnd is delayed one byte.
|
||||||
|
// We might block trying to get that byte from src,
|
||||||
|
// so instead invent a space byte.
|
||||||
|
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||||
|
scanp += i + 1
|
||||||
|
break Input
|
||||||
|
}
|
||||||
|
if v == scanError {
|
||||||
|
dec.err = dec.scan.err
|
||||||
|
return 0, dec.scan.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scanp = len(dec.buf)
|
||||||
|
|
||||||
|
// Did the last read have an error?
|
||||||
|
// Delayed until now to allow buffer scan.
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||||
|
break Input
|
||||||
|
}
|
||||||
|
if nonSpace(dec.buf) {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dec.err = err
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n := scanp - dec.scanp
|
||||||
|
err = dec.refill()
|
||||||
|
scanp = dec.scanp + n
|
||||||
|
}
|
||||||
|
return scanp - dec.scanp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) refill() error {
|
||||||
|
// Make room to read more into the buffer.
|
||||||
|
// First slide down data already consumed.
|
||||||
|
if dec.scanp > 0 {
|
||||||
|
n := copy(dec.buf, dec.buf[dec.scanp:])
|
||||||
|
dec.buf = dec.buf[:n]
|
||||||
|
dec.scanp = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grow buffer if not large enough.
|
||||||
|
const minRead = 512
|
||||||
|
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||||
|
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||||
|
copy(newBuf, dec.buf)
|
||||||
|
dec.buf = newBuf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read. Delay error for next iteration (after scan).
|
||||||
|
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||||
|
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func nonSpace(b []byte) bool {
|
||||||
|
for _, c := range b {
|
||||||
|
if !isSpace(c) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Encoder writes JSON values to an output stream.
|
||||||
|
type Encoder struct {
|
||||||
|
w io.Writer
|
||||||
|
err error
|
||||||
|
escapeHTML bool
|
||||||
|
|
||||||
|
indentBuf *bytes.Buffer
|
||||||
|
indentPrefix string
|
||||||
|
indentValue string
|
||||||
|
|
||||||
|
ext Extension
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new encoder that writes to w.
|
||||||
|
func NewEncoder(w io.Writer) *Encoder {
|
||||||
|
return &Encoder{w: w, escapeHTML: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode writes the JSON encoding of v to the stream,
|
||||||
|
// followed by a newline character.
|
||||||
|
//
|
||||||
|
// See the documentation for Marshal for details about the
|
||||||
|
// conversion of Go values to JSON.
|
||||||
|
func (enc *Encoder) Encode(v interface{}) error {
|
||||||
|
if enc.err != nil {
|
||||||
|
return enc.err
|
||||||
|
}
|
||||||
|
e := newEncodeState()
|
||||||
|
e.ext = enc.ext
|
||||||
|
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate each value with a newline.
|
||||||
|
// This makes the output look a little nicer
|
||||||
|
// when debugging, and some kind of space
|
||||||
|
// is required if the encoded value was a number,
|
||||||
|
// so that the reader knows there aren't more
|
||||||
|
// digits coming.
|
||||||
|
e.WriteByte('\n')
|
||||||
|
|
||||||
|
b := e.Bytes()
|
||||||
|
if enc.indentBuf != nil {
|
||||||
|
enc.indentBuf.Reset()
|
||||||
|
err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b = enc.indentBuf.Bytes()
|
||||||
|
}
|
||||||
|
if _, err = enc.w.Write(b); err != nil {
|
||||||
|
enc.err = err
|
||||||
|
}
|
||||||
|
encodeStatePool.Put(e)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Indent sets the encoder to format each encoded value with Indent.
|
||||||
|
func (enc *Encoder) Indent(prefix, indent string) {
|
||||||
|
enc.indentBuf = new(bytes.Buffer)
|
||||||
|
enc.indentPrefix = prefix
|
||||||
|
enc.indentValue = indent
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableHTMLEscaping causes the encoder not to escape angle brackets
|
||||||
|
// ("<" and ">") or ampersands ("&") in JSON strings.
|
||||||
|
func (enc *Encoder) DisableHTMLEscaping() {
|
||||||
|
enc.escapeHTML = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawMessage is a raw encoded JSON value.
|
||||||
|
// It implements Marshaler and Unmarshaler and can
|
||||||
|
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||||
|
type RawMessage []byte
|
||||||
|
|
||||||
|
// MarshalJSON returns *m as the JSON encoding of m.
|
||||||
|
func (m *RawMessage) MarshalJSON() ([]byte, error) {
|
||||||
|
return *m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON sets *m to a copy of data.
|
||||||
|
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||||
|
if m == nil {
|
||||||
|
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||||
|
}
|
||||||
|
*m = append((*m)[0:0], data...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Marshaler = (*RawMessage)(nil)
|
||||||
|
var _ Unmarshaler = (*RawMessage)(nil)
|
||||||
|
|
||||||
|
// A Token holds a value of one of these types:
|
||||||
|
//
|
||||||
|
// Delim, for the four JSON delimiters [ ] { }
|
||||||
|
// bool, for JSON booleans
|
||||||
|
// float64, for JSON numbers
|
||||||
|
// Number, for JSON numbers
|
||||||
|
// string, for JSON string literals
|
||||||
|
// nil, for JSON null
|
||||||
|
//
|
||||||
|
type Token interface{}
|
||||||
|
|
||||||
|
const (
|
||||||
|
tokenTopValue = iota
|
||||||
|
tokenArrayStart
|
||||||
|
tokenArrayValue
|
||||||
|
tokenArrayComma
|
||||||
|
tokenObjectStart
|
||||||
|
tokenObjectKey
|
||||||
|
tokenObjectColon
|
||||||
|
tokenObjectValue
|
||||||
|
tokenObjectComma
|
||||||
|
)
|
||||||
|
|
||||||
|
// advance tokenstate from a separator state to a value state
|
||||||
|
func (dec *Decoder) tokenPrepareForDecode() error {
|
||||||
|
// Note: Not calling peek before switch, to avoid
|
||||||
|
// putting peek into the standard Decode path.
|
||||||
|
// peek is only called when using the Token API.
|
||||||
|
switch dec.tokenState {
|
||||||
|
case tokenArrayComma:
|
||||||
|
c, err := dec.peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if c != ',' {
|
||||||
|
return &SyntaxError{"expected comma after array element", 0}
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenArrayValue
|
||||||
|
case tokenObjectColon:
|
||||||
|
c, err := dec.peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if c != ':' {
|
||||||
|
return &SyntaxError{"expected colon after object key", 0}
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenObjectValue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) tokenValueAllowed() bool {
|
||||||
|
switch dec.tokenState {
|
||||||
|
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) tokenValueEnd() {
|
||||||
|
switch dec.tokenState {
|
||||||
|
case tokenArrayStart, tokenArrayValue:
|
||||||
|
dec.tokenState = tokenArrayComma
|
||||||
|
case tokenObjectValue:
|
||||||
|
dec.tokenState = tokenObjectComma
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
||||||
|
type Delim rune
|
||||||
|
|
||||||
|
func (d Delim) String() string {
|
||||||
|
return string(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token returns the next JSON token in the input stream.
|
||||||
|
// At the end of the input stream, Token returns nil, io.EOF.
|
||||||
|
//
|
||||||
|
// Token guarantees that the delimiters [ ] { } it returns are
|
||||||
|
// properly nested and matched: if Token encounters an unexpected
|
||||||
|
// delimiter in the input, it will return an error.
|
||||||
|
//
|
||||||
|
// The input stream consists of basic JSON values—bool, string,
|
||||||
|
// number, and null—along with delimiters [ ] { } of type Delim
|
||||||
|
// to mark the start and end of arrays and objects.
|
||||||
|
// Commas and colons are elided.
|
||||||
|
func (dec *Decoder) Token() (Token, error) {
|
||||||
|
for {
|
||||||
|
c, err := dec.peek()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '[':
|
||||||
|
if !dec.tokenValueAllowed() {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||||
|
dec.tokenState = tokenArrayStart
|
||||||
|
return Delim('['), nil
|
||||||
|
|
||||||
|
case ']':
|
||||||
|
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||||
|
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||||
|
dec.tokenValueEnd()
|
||||||
|
return Delim(']'), nil
|
||||||
|
|
||||||
|
case '{':
|
||||||
|
if !dec.tokenValueAllowed() {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||||
|
dec.tokenState = tokenObjectStart
|
||||||
|
return Delim('{'), nil
|
||||||
|
|
||||||
|
case '}':
|
||||||
|
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||||
|
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||||
|
dec.tokenValueEnd()
|
||||||
|
return Delim('}'), nil
|
||||||
|
|
||||||
|
case ':':
|
||||||
|
if dec.tokenState != tokenObjectColon {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenObjectValue
|
||||||
|
continue
|
||||||
|
|
||||||
|
case ',':
|
||||||
|
if dec.tokenState == tokenArrayComma {
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenArrayValue
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if dec.tokenState == tokenObjectComma {
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenObjectKey
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return dec.tokenError(c)
|
||||||
|
|
||||||
|
case '"':
|
||||||
|
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
||||||
|
var x string
|
||||||
|
old := dec.tokenState
|
||||||
|
dec.tokenState = tokenTopValue
|
||||||
|
err := dec.Decode(&x)
|
||||||
|
dec.tokenState = old
|
||||||
|
if err != nil {
|
||||||
|
clearOffset(err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dec.tokenState = tokenObjectColon
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
default:
|
||||||
|
if !dec.tokenValueAllowed() {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
var x interface{}
|
||||||
|
if err := dec.Decode(&x); err != nil {
|
||||||
|
clearOffset(err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func clearOffset(err error) {
|
||||||
|
if s, ok := err.(*SyntaxError); ok {
|
||||||
|
s.Offset = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
||||||
|
var context string
|
||||||
|
switch dec.tokenState {
|
||||||
|
case tokenTopValue:
|
||||||
|
context = " looking for beginning of value"
|
||||||
|
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||||
|
context = " looking for beginning of value"
|
||||||
|
case tokenArrayComma:
|
||||||
|
context = " after array element"
|
||||||
|
case tokenObjectKey:
|
||||||
|
context = " looking for beginning of object key string"
|
||||||
|
case tokenObjectColon:
|
||||||
|
context = " after object key"
|
||||||
|
case tokenObjectComma:
|
||||||
|
context = " after object key:value pair"
|
||||||
|
}
|
||||||
|
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
// More reports whether there is another element in the
|
||||||
|
// current array or object being parsed.
|
||||||
|
func (dec *Decoder) More() bool {
|
||||||
|
c, err := dec.peek()
|
||||||
|
return err == nil && c != ']' && c != '}'
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) peek() (byte, error) {
|
||||||
|
var err error
|
||||||
|
for {
|
||||||
|
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||||
|
c := dec.buf[i]
|
||||||
|
if isSpace(c) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dec.scanp = i
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
// buffer has been scanned, now report any error
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = dec.refill()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
TODO
|
||||||
|
|
||||||
|
// EncodeToken writes the given JSON token to the stream.
|
||||||
|
// It returns an error if the delimiters [ ] { } are not properly used.
|
||||||
|
//
|
||||||
|
// EncodeToken does not call Flush, because usually it is part of
|
||||||
|
// a larger operation such as Encode, and those will call Flush when finished.
|
||||||
|
// Callers that create an Encoder and then invoke EncodeToken directly,
|
||||||
|
// without using Encode, need to call Flush when finished to ensure that
|
||||||
|
// the JSON is written to the underlying writer.
|
||||||
|
func (e *Encoder) EncodeToken(t Token) error {
|
||||||
|
...
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
|
@ -0,0 +1,44 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// tagOptions is the string following a comma in a struct field's "json"
|
||||||
|
// tag, or the empty string. It does not include the leading comma.
|
||||||
|
type tagOptions string
|
||||||
|
|
||||||
|
// parseTag splits a struct field's json tag into its name and
|
||||||
|
// comma-separated options.
|
||||||
|
func parseTag(tag string) (string, tagOptions) {
|
||||||
|
if idx := strings.Index(tag, ","); idx != -1 {
|
||||||
|
return tag[:idx], tagOptions(tag[idx+1:])
|
||||||
|
}
|
||||||
|
return tag, tagOptions("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains reports whether a comma-separated list of options
|
||||||
|
// contains a particular substr flag. substr must be surrounded by a
|
||||||
|
// string boundary or commas.
|
||||||
|
func (o tagOptions) Contains(optionName string) bool {
|
||||||
|
if len(o) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s := string(o)
|
||||||
|
for s != "" {
|
||||||
|
var next string
|
||||||
|
i := strings.Index(s, ",")
|
||||||
|
if i >= 0 {
|
||||||
|
s, next = s[:i], s[i+1:]
|
||||||
|
}
|
||||||
|
if s == optionName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
s = next
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
load(
|
||||||
|
"@io_bazel_rules_go//go:def.bzl",
|
||||||
|
"go_library",
|
||||||
|
"go_test",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = ["errors_test.go"],
|
||||||
|
library = ":go_default_library",
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"doc.go",
|
||||||
|
"errors.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
|
@ -0,0 +1,26 @@
|
||||||
|
reviewers:
|
||||||
|
- thockin
|
||||||
|
- lavalamp
|
||||||
|
- smarterclayton
|
||||||
|
- wojtek-t
|
||||||
|
- deads2k
|
||||||
|
- brendandburns
|
||||||
|
- derekwaynecarr
|
||||||
|
- caesarxuchao
|
||||||
|
- mikedanese
|
||||||
|
- liggitt
|
||||||
|
- nikhiljindal
|
||||||
|
- gmarek
|
||||||
|
- erictune
|
||||||
|
- saad-ali
|
||||||
|
- janetkuo
|
||||||
|
- timstclair
|
||||||
|
- eparis
|
||||||
|
- timothysc
|
||||||
|
- dims
|
||||||
|
- hongchaodeng
|
||||||
|
- krousey
|
||||||
|
- satnam6502
|
||||||
|
- cjcullen
|
||||||
|
- david-mcmahon
|
||||||
|
- goltermann
|
|
@ -0,0 +1,18 @@
|
||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package errors provides detailed error types for api field validation.
|
||||||
|
package errors // import "k8s.io/apimachinery/pkg/api/errors"
|
|
@ -0,0 +1,478 @@
|
||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTP Status codes not in the golang http package.
|
||||||
|
const (
|
||||||
|
StatusUnprocessableEntity = 422
|
||||||
|
StatusTooManyRequests = 429
|
||||||
|
// StatusServerTimeout is an indication that a transient server error has
|
||||||
|
// occurred and the client *should* retry, with an optional Retry-After
|
||||||
|
// header to specify the back off window.
|
||||||
|
StatusServerTimeout = 504
|
||||||
|
)
|
||||||
|
|
||||||
|
// StatusError is an error intended for consumption by a REST API server; it can also be
|
||||||
|
// reconstructed by clients from a REST response. Public to allow easy type switches.
|
||||||
|
type StatusError struct {
|
||||||
|
ErrStatus metav1.Status
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIStatus is exposed by errors that can be converted to an api.Status object
|
||||||
|
// for finer grained details.
|
||||||
|
type APIStatus interface {
|
||||||
|
Status() metav1.Status
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = &StatusError{}
|
||||||
|
|
||||||
|
// Error implements the Error interface.
|
||||||
|
func (e *StatusError) Error() string {
|
||||||
|
return e.ErrStatus.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status allows access to e's status without having to know the detailed workings
|
||||||
|
// of StatusError.
|
||||||
|
func (e *StatusError) Status() metav1.Status {
|
||||||
|
return e.ErrStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// DebugError reports extended info about the error to debug output.
|
||||||
|
func (e *StatusError) DebugError() (string, []interface{}) {
|
||||||
|
if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil {
|
||||||
|
return "server response object: %s", []interface{}{string(out)}
|
||||||
|
}
|
||||||
|
return "server response object: %#v", []interface{}{e.ErrStatus}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object.
|
||||||
|
type UnexpectedObjectError struct {
|
||||||
|
Object runtime.Object
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns an error message describing 'u'.
|
||||||
|
func (u *UnexpectedObjectError) Error() string {
|
||||||
|
return fmt.Sprintf("unexpected object: %v", u.Object)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise,
|
||||||
|
// returns an UnexpecteObjectError.
|
||||||
|
func FromObject(obj runtime.Object) error {
|
||||||
|
switch t := obj.(type) {
|
||||||
|
case *metav1.Status:
|
||||||
|
return &StatusError{*t}
|
||||||
|
}
|
||||||
|
return &UnexpectedObjectError{obj}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found.
|
||||||
|
func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
Reason: metav1.StatusReasonNotFound,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Group: qualifiedResource.Group,
|
||||||
|
Kind: qualifiedResource.Resource,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAlreadyExists returns an error indicating the item requested exists by that identifier.
|
||||||
|
func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusConflict,
|
||||||
|
Reason: metav1.StatusReasonAlreadyExists,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Group: qualifiedResource.Group,
|
||||||
|
Kind: qualifiedResource.Resource,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnauthorized returns an error indicating the client is not authorized to perform the requested
|
||||||
|
// action.
|
||||||
|
func NewUnauthorized(reason string) *StatusError {
|
||||||
|
message := reason
|
||||||
|
if len(message) == 0 {
|
||||||
|
message = "not authorized"
|
||||||
|
}
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusUnauthorized,
|
||||||
|
Reason: metav1.StatusReasonUnauthorized,
|
||||||
|
Message: message,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewForbidden returns an error indicating the requested action was forbidden
|
||||||
|
func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusForbidden,
|
||||||
|
Reason: metav1.StatusReasonForbidden,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Group: qualifiedResource.Group,
|
||||||
|
Kind: qualifiedResource.Resource,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConflict returns an error indicating the item can't be updated as provided.
|
||||||
|
func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusConflict,
|
||||||
|
Reason: metav1.StatusReasonConflict,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Group: qualifiedResource.Group,
|
||||||
|
Kind: qualifiedResource.Resource,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
|
||||||
|
func NewGone(message string) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusGone,
|
||||||
|
Reason: metav1.StatusReasonGone,
|
||||||
|
Message: message,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInvalid returns an error indicating the item is invalid and cannot be processed.
|
||||||
|
func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError {
|
||||||
|
causes := make([]metav1.StatusCause, 0, len(errs))
|
||||||
|
for i := range errs {
|
||||||
|
err := errs[i]
|
||||||
|
causes = append(causes, metav1.StatusCause{
|
||||||
|
Type: metav1.CauseType(err.Type),
|
||||||
|
Message: err.ErrorBody(),
|
||||||
|
Field: err.Field,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity
|
||||||
|
Reason: metav1.StatusReasonInvalid,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Group: qualifiedKind.Group,
|
||||||
|
Kind: qualifiedKind.Kind,
|
||||||
|
Name: name,
|
||||||
|
Causes: causes,
|
||||||
|
},
|
||||||
|
Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBadRequest creates an error that indicates that the request is invalid and can not be processed.
|
||||||
|
func NewBadRequest(reason string) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Reason: metav1.StatusReasonBadRequest,
|
||||||
|
Message: reason,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServiceUnavailable creates an error that indicates that the requested service is unavailable.
|
||||||
|
func NewServiceUnavailable(reason string) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusServiceUnavailable,
|
||||||
|
Reason: metav1.StatusReasonServiceUnavailable,
|
||||||
|
Message: reason,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind.
|
||||||
|
func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusMethodNotAllowed,
|
||||||
|
Reason: metav1.StatusReasonMethodNotAllowed,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Group: qualifiedResource.Group,
|
||||||
|
Kind: qualifiedResource.Resource,
|
||||||
|
},
|
||||||
|
Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerTimeout returns an error indicating the requested action could not be completed due to a
|
||||||
|
// transient error, and the client should try again.
|
||||||
|
func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusInternalServerError,
|
||||||
|
Reason: metav1.StatusReasonServerTimeout,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Group: qualifiedResource.Group,
|
||||||
|
Kind: qualifiedResource.Resource,
|
||||||
|
Name: operation,
|
||||||
|
RetryAfterSeconds: int32(retryAfterSeconds),
|
||||||
|
},
|
||||||
|
Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we
|
||||||
|
// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this.
|
||||||
|
func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError {
|
||||||
|
return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInternalError returns an error indicating the item is invalid and cannot be processed.
|
||||||
|
func NewInternalError(err error) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusInternalServerError,
|
||||||
|
Reason: metav1.StatusReasonInternalError,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Causes: []metav1.StatusCause{{Message: err.Error()}},
|
||||||
|
},
|
||||||
|
Message: fmt.Sprintf("Internal error occurred: %v", err),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimeoutError returns an error indicating that a timeout occurred before the request
|
||||||
|
// could be completed. Clients may retry, but the operation may still complete.
|
||||||
|
func NewTimeoutError(message string, retryAfterSeconds int) *StatusError {
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: StatusServerTimeout,
|
||||||
|
Reason: metav1.StatusReasonTimeout,
|
||||||
|
Message: fmt.Sprintf("Timeout: %s", message),
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
RetryAfterSeconds: int32(retryAfterSeconds),
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
|
||||||
|
func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
|
||||||
|
reason := metav1.StatusReasonUnknown
|
||||||
|
message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
|
||||||
|
switch code {
|
||||||
|
case http.StatusConflict:
|
||||||
|
if verb == "POST" {
|
||||||
|
reason = metav1.StatusReasonAlreadyExists
|
||||||
|
} else {
|
||||||
|
reason = metav1.StatusReasonConflict
|
||||||
|
}
|
||||||
|
message = "the server reported a conflict"
|
||||||
|
case http.StatusNotFound:
|
||||||
|
reason = metav1.StatusReasonNotFound
|
||||||
|
message = "the server could not find the requested resource"
|
||||||
|
case http.StatusBadRequest:
|
||||||
|
reason = metav1.StatusReasonBadRequest
|
||||||
|
message = "the server rejected our request for an unknown reason"
|
||||||
|
case http.StatusUnauthorized:
|
||||||
|
reason = metav1.StatusReasonUnauthorized
|
||||||
|
message = "the server has asked for the client to provide credentials"
|
||||||
|
case http.StatusForbidden:
|
||||||
|
reason = metav1.StatusReasonForbidden
|
||||||
|
// the server message has details about who is trying to perform what action. Keep its message.
|
||||||
|
message = serverMessage
|
||||||
|
case http.StatusMethodNotAllowed:
|
||||||
|
reason = metav1.StatusReasonMethodNotAllowed
|
||||||
|
message = "the server does not allow this method on the requested resource"
|
||||||
|
case StatusUnprocessableEntity:
|
||||||
|
reason = metav1.StatusReasonInvalid
|
||||||
|
message = "the server rejected our request due to an error in our request"
|
||||||
|
case StatusServerTimeout:
|
||||||
|
reason = metav1.StatusReasonServerTimeout
|
||||||
|
message = "the server cannot complete the requested operation at this time, try again later"
|
||||||
|
case StatusTooManyRequests:
|
||||||
|
reason = metav1.StatusReasonTimeout
|
||||||
|
message = "the server has received too many requests and has asked us to try again later"
|
||||||
|
default:
|
||||||
|
if code >= 500 {
|
||||||
|
reason = metav1.StatusReasonInternalError
|
||||||
|
message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case !qualifiedResource.Empty() && len(name) > 0:
|
||||||
|
message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name)
|
||||||
|
case !qualifiedResource.Empty():
|
||||||
|
message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String())
|
||||||
|
}
|
||||||
|
var causes []metav1.StatusCause
|
||||||
|
if isUnexpectedResponse {
|
||||||
|
causes = []metav1.StatusCause{
|
||||||
|
{
|
||||||
|
Type: metav1.CauseTypeUnexpectedServerResponse,
|
||||||
|
Message: serverMessage,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
causes = nil
|
||||||
|
}
|
||||||
|
return &StatusError{metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: int32(code),
|
||||||
|
Reason: reason,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Group: qualifiedResource.Group,
|
||||||
|
Kind: qualifiedResource.Resource,
|
||||||
|
Name: name,
|
||||||
|
|
||||||
|
Causes: causes,
|
||||||
|
RetryAfterSeconds: int32(retryAfterSeconds),
|
||||||
|
},
|
||||||
|
Message: message,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotFound returns true if the specified error was created by NewNotFound.
|
||||||
|
func IsNotFound(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
|
||||||
|
func IsAlreadyExists(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonAlreadyExists
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConflict determines if the err is an error which indicates the provided update conflicts.
|
||||||
|
func IsConflict(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonConflict
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInvalid determines if the err is an error which indicates the provided resource is not valid.
|
||||||
|
func IsInvalid(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMethodNotSupported determines if the err is an error which indicates the provided action could not
|
||||||
|
// be performed because it is not supported by the server.
|
||||||
|
func IsMethodNotSupported(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonMethodNotAllowed
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBadRequest determines if err is an error which indicates that the request is invalid.
|
||||||
|
func IsBadRequest(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonBadRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and
|
||||||
|
// requires authentication by the user.
|
||||||
|
func IsUnauthorized(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonUnauthorized
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot
|
||||||
|
// be completed as requested.
|
||||||
|
func IsForbidden(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonForbidden
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTimeout determines if err is an error which indicates that request times out due to long
|
||||||
|
// processing.
|
||||||
|
func IsTimeout(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsServerTimeout determines if err is an error which indicates that the request needs to be retried
|
||||||
|
// by the client.
|
||||||
|
func IsServerTimeout(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonServerTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInternalError determines if err is an error which indicates an internal server error.
|
||||||
|
func IsInternalError(err error) bool {
|
||||||
|
return reasonForError(err) == metav1.StatusReasonInternalError
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTooManyRequests determines if err is an error which indicates that there are too many requests
|
||||||
|
// that the server cannot handle.
|
||||||
|
// TODO: update IsTooManyRequests() when the TooManyRequests(429) error returned from the API server has a non-empty Reason field
|
||||||
|
func IsTooManyRequests(err error) bool {
|
||||||
|
switch t := err.(type) {
|
||||||
|
case APIStatus:
|
||||||
|
return t.Status().Code == StatusTooManyRequests
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnexpectedServerError returns true if the server response was not in the expected API format,
|
||||||
|
// and may be the result of another HTTP actor.
|
||||||
|
func IsUnexpectedServerError(err error) bool {
|
||||||
|
switch t := err.(type) {
|
||||||
|
case APIStatus:
|
||||||
|
if d := t.Status().Details; d != nil {
|
||||||
|
for _, cause := range d.Causes {
|
||||||
|
if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnexpectedObjectError determines if err is due to an unexpected object from the master.
|
||||||
|
func IsUnexpectedObjectError(err error) bool {
|
||||||
|
_, ok := err.(*UnexpectedObjectError)
|
||||||
|
return err != nil && ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestsClientDelay returns true if this error suggests a client delay as well as the
|
||||||
|
// suggested seconds to wait, or false if the error does not imply a wait.
|
||||||
|
func SuggestsClientDelay(err error) (int, bool) {
|
||||||
|
switch t := err.(type) {
|
||||||
|
case APIStatus:
|
||||||
|
if t.Status().Details != nil {
|
||||||
|
switch t.Status().Reason {
|
||||||
|
case metav1.StatusReasonServerTimeout, metav1.StatusReasonTimeout:
|
||||||
|
return int(t.Status().Details.RetryAfterSeconds), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func reasonForError(err error) metav1.StatusReason {
|
||||||
|
switch t := err.(type) {
|
||||||
|
case APIStatus:
|
||||||
|
return t.Status().Reason
|
||||||
|
}
|
||||||
|
return metav1.StatusReasonUnknown
|
||||||
|
}
|
137
vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
generated
vendored
Normal file
137
vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package streaming implements encoder and decoder for streams
|
||||||
|
// of runtime.Objects over io.Writer/Readers.
|
||||||
|
package streaming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encoder is a runtime.Encoder on a stream.
|
||||||
|
type Encoder interface {
|
||||||
|
// Encode will write the provided object to the stream or return an error. It obeys the same
|
||||||
|
// contract as runtime.VersionedEncoder.
|
||||||
|
Encode(obj runtime.Object) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoder is a runtime.Decoder from a stream.
|
||||||
|
type Decoder interface {
|
||||||
|
// Decode will return io.EOF when no more objects are available.
|
||||||
|
Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error)
|
||||||
|
// Close closes the underlying stream.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serializer is a factory for creating encoders and decoders that work over streams.
|
||||||
|
type Serializer interface {
|
||||||
|
NewEncoder(w io.Writer) Encoder
|
||||||
|
NewDecoder(r io.ReadCloser) Decoder
|
||||||
|
}
|
||||||
|
|
||||||
|
type decoder struct {
|
||||||
|
reader io.ReadCloser
|
||||||
|
decoder runtime.Decoder
|
||||||
|
buf []byte
|
||||||
|
maxBytes int
|
||||||
|
resetRead bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
|
||||||
|
// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
|
||||||
|
// an entire object.
|
||||||
|
func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
|
||||||
|
return &decoder{
|
||||||
|
reader: r,
|
||||||
|
decoder: d,
|
||||||
|
buf: make([]byte, 1024),
|
||||||
|
maxBytes: 1024 * 1024,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
|
||||||
|
|
||||||
|
// Decode reads the next object from the stream and decodes it.
|
||||||
|
func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
|
||||||
|
base := 0
|
||||||
|
for {
|
||||||
|
n, err := d.reader.Read(d.buf[base:])
|
||||||
|
if err == io.ErrShortBuffer {
|
||||||
|
if n == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
|
||||||
|
}
|
||||||
|
if d.resetRead {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// double the buffer size up to maxBytes
|
||||||
|
if len(d.buf) < d.maxBytes {
|
||||||
|
base += n
|
||||||
|
d.buf = append(d.buf, make([]byte, len(d.buf))...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// must read the rest of the frame (until we stop getting ErrShortBuffer)
|
||||||
|
d.resetRead = true
|
||||||
|
base = 0
|
||||||
|
return nil, nil, ErrObjectTooLarge
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if d.resetRead {
|
||||||
|
// now that we have drained the large read, continue
|
||||||
|
d.resetRead = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
base += n
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return d.decoder.Decode(d.buf[:base], defaults, into)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) Close() error {
|
||||||
|
return d.reader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type encoder struct {
|
||||||
|
writer io.Writer
|
||||||
|
encoder runtime.Encoder
|
||||||
|
buf *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new streaming encoder.
|
||||||
|
func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
|
||||||
|
return &encoder{
|
||||||
|
writer: w,
|
||||||
|
encoder: e,
|
||||||
|
buf: &bytes.Buffer{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode writes the provided object to the nested writer.
|
||||||
|
func (e *encoder) Encode(obj runtime.Object) error {
|
||||||
|
if err := e.encoder.Encode(obj, e.buf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err := e.writer.Write(e.buf.Bytes())
|
||||||
|
e.buf.Reset()
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
load(
|
||||||
|
"@io_bazel_rules_go//go:def.bzl",
|
||||||
|
"go_library",
|
||||||
|
"go_test",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = ["clock_test.go"],
|
||||||
|
library = ":go_default_library",
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["clock.go"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
)
|
|
@ -0,0 +1,327 @@
|
||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package clock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Clock allows for injecting fake or real clocks into code that
|
||||||
|
// needs to do arbitrary things based on time.
|
||||||
|
type Clock interface {
|
||||||
|
Now() time.Time
|
||||||
|
Since(time.Time) time.Duration
|
||||||
|
After(d time.Duration) <-chan time.Time
|
||||||
|
NewTimer(d time.Duration) Timer
|
||||||
|
Sleep(d time.Duration)
|
||||||
|
Tick(d time.Duration) <-chan time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ = Clock(RealClock{})
|
||||||
|
_ = Clock(&FakeClock{})
|
||||||
|
_ = Clock(&IntervalClock{})
|
||||||
|
)
|
||||||
|
|
||||||
|
// RealClock really calls time.Now()
|
||||||
|
type RealClock struct{}
|
||||||
|
|
||||||
|
// Now returns the current time.
|
||||||
|
func (RealClock) Now() time.Time {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since returns time since the specified timestamp.
|
||||||
|
func (RealClock) Since(ts time.Time) time.Duration {
|
||||||
|
return time.Since(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same as time.After(d).
|
||||||
|
func (RealClock) After(d time.Duration) <-chan time.Time {
|
||||||
|
return time.After(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (RealClock) NewTimer(d time.Duration) Timer {
|
||||||
|
return &realTimer{
|
||||||
|
timer: time.NewTimer(d),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (RealClock) Tick(d time.Duration) <-chan time.Time {
|
||||||
|
return time.Tick(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (RealClock) Sleep(d time.Duration) {
|
||||||
|
time.Sleep(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FakeClock implements Clock, but returns an arbitrary time.
|
||||||
|
type FakeClock struct {
|
||||||
|
lock sync.RWMutex
|
||||||
|
time time.Time
|
||||||
|
|
||||||
|
// waiters are waiting for the fake time to pass their specified time
|
||||||
|
waiters []fakeClockWaiter
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeClockWaiter struct {
|
||||||
|
targetTime time.Time
|
||||||
|
stepInterval time.Duration
|
||||||
|
skipIfBlocked bool
|
||||||
|
destChan chan time.Time
|
||||||
|
fired bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFakeClock(t time.Time) *FakeClock {
|
||||||
|
return &FakeClock{
|
||||||
|
time: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now returns f's time.
|
||||||
|
func (f *FakeClock) Now() time.Time {
|
||||||
|
f.lock.RLock()
|
||||||
|
defer f.lock.RUnlock()
|
||||||
|
return f.time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since returns time since the time in f.
|
||||||
|
func (f *FakeClock) Since(ts time.Time) time.Duration {
|
||||||
|
f.lock.RLock()
|
||||||
|
defer f.lock.RUnlock()
|
||||||
|
return f.time.Sub(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fake version of time.After(d).
|
||||||
|
func (f *FakeClock) After(d time.Duration) <-chan time.Time {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
stopTime := f.time.Add(d)
|
||||||
|
ch := make(chan time.Time, 1) // Don't block!
|
||||||
|
f.waiters = append(f.waiters, fakeClockWaiter{
|
||||||
|
targetTime: stopTime,
|
||||||
|
destChan: ch,
|
||||||
|
})
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fake version of time.NewTimer(d).
|
||||||
|
func (f *FakeClock) NewTimer(d time.Duration) Timer {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
stopTime := f.time.Add(d)
|
||||||
|
ch := make(chan time.Time, 1) // Don't block!
|
||||||
|
timer := &fakeTimer{
|
||||||
|
fakeClock: f,
|
||||||
|
waiter: fakeClockWaiter{
|
||||||
|
targetTime: stopTime,
|
||||||
|
destChan: ch,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
f.waiters = append(f.waiters, timer.waiter)
|
||||||
|
return timer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
tickTime := f.time.Add(d)
|
||||||
|
ch := make(chan time.Time, 1) // hold one tick
|
||||||
|
f.waiters = append(f.waiters, fakeClockWaiter{
|
||||||
|
targetTime: tickTime,
|
||||||
|
stepInterval: d,
|
||||||
|
skipIfBlocked: true,
|
||||||
|
destChan: ch,
|
||||||
|
})
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer
|
||||||
|
func (f *FakeClock) Step(d time.Duration) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
f.setTimeLocked(f.time.Add(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the time.
|
||||||
|
func (f *FakeClock) SetTime(t time.Time) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
f.setTimeLocked(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actually changes the time and checks any waiters. f must be write-locked.
|
||||||
|
func (f *FakeClock) setTimeLocked(t time.Time) {
|
||||||
|
f.time = t
|
||||||
|
newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
|
||||||
|
for i := range f.waiters {
|
||||||
|
w := &f.waiters[i]
|
||||||
|
if !w.targetTime.After(t) {
|
||||||
|
|
||||||
|
if w.skipIfBlocked {
|
||||||
|
select {
|
||||||
|
case w.destChan <- t:
|
||||||
|
w.fired = true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
w.destChan <- t
|
||||||
|
w.fired = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.stepInterval > 0 {
|
||||||
|
for !w.targetTime.After(t) {
|
||||||
|
w.targetTime = w.targetTime.Add(w.stepInterval)
|
||||||
|
}
|
||||||
|
newWaiters = append(newWaiters, *w)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
newWaiters = append(newWaiters, f.waiters[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.waiters = newWaiters
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if After has been called on f but not yet satisfied (so you can
|
||||||
|
// write race-free tests).
|
||||||
|
func (f *FakeClock) HasWaiters() bool {
|
||||||
|
f.lock.RLock()
|
||||||
|
defer f.lock.RUnlock()
|
||||||
|
return len(f.waiters) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FakeClock) Sleep(d time.Duration) {
|
||||||
|
f.Step(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
|
||||||
|
type IntervalClock struct {
|
||||||
|
Time time.Time
|
||||||
|
Duration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now returns i's time.
|
||||||
|
func (i *IntervalClock) Now() time.Time {
|
||||||
|
i.Time = i.Time.Add(i.Duration)
|
||||||
|
return i.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since returns time since the time in i.
|
||||||
|
func (i *IntervalClock) Since(ts time.Time) time.Duration {
|
||||||
|
return i.Time.Sub(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unimplemented, will panic.
|
||||||
|
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||||
|
func (*IntervalClock) After(d time.Duration) <-chan time.Time {
|
||||||
|
panic("IntervalClock doesn't implement After")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unimplemented, will panic.
|
||||||
|
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||||
|
func (*IntervalClock) NewTimer(d time.Duration) Timer {
|
||||||
|
panic("IntervalClock doesn't implement NewTimer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unimplemented, will panic.
|
||||||
|
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||||
|
func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
|
||||||
|
panic("IntervalClock doesn't implement Tick")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*IntervalClock) Sleep(d time.Duration) {
|
||||||
|
panic("IntervalClock doesn't implement Sleep")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timer allows for injecting fake or real timers into code that
|
||||||
|
// needs to do arbitrary things based on time.
|
||||||
|
type Timer interface {
|
||||||
|
C() <-chan time.Time
|
||||||
|
Stop() bool
|
||||||
|
Reset(d time.Duration) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ = Timer(&realTimer{})
|
||||||
|
_ = Timer(&fakeTimer{})
|
||||||
|
)
|
||||||
|
|
||||||
|
// realTimer is backed by an actual time.Timer.
|
||||||
|
type realTimer struct {
|
||||||
|
timer *time.Timer
|
||||||
|
}
|
||||||
|
|
||||||
|
// C returns the underlying timer's channel.
|
||||||
|
func (r *realTimer) C() <-chan time.Time {
|
||||||
|
return r.timer.C
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop calls Stop() on the underlying timer.
|
||||||
|
func (r *realTimer) Stop() bool {
|
||||||
|
return r.timer.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset calls Reset() on the underlying timer.
|
||||||
|
func (r *realTimer) Reset(d time.Duration) bool {
|
||||||
|
return r.timer.Reset(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fakeTimer implements Timer based on a FakeClock.
|
||||||
|
type fakeTimer struct {
|
||||||
|
fakeClock *FakeClock
|
||||||
|
waiter fakeClockWaiter
|
||||||
|
}
|
||||||
|
|
||||||
|
// C returns the channel that notifies when this timer has fired.
|
||||||
|
func (f *fakeTimer) C() <-chan time.Time {
|
||||||
|
return f.waiter.destChan
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise.
|
||||||
|
func (f *fakeTimer) Stop() bool {
|
||||||
|
f.fakeClock.lock.Lock()
|
||||||
|
defer f.fakeClock.lock.Unlock()
|
||||||
|
|
||||||
|
newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters))
|
||||||
|
for i := range f.fakeClock.waiters {
|
||||||
|
w := &f.fakeClock.waiters[i]
|
||||||
|
if w != &f.waiter {
|
||||||
|
newWaiters = append(newWaiters, *w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fakeClock.waiters = newWaiters
|
||||||
|
|
||||||
|
return !f.waiter.fired
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet
|
||||||
|
// fired, or false otherwise.
|
||||||
|
func (f *fakeTimer) Reset(d time.Duration) bool {
|
||||||
|
f.fakeClock.lock.Lock()
|
||||||
|
defer f.fakeClock.lock.Unlock()
|
||||||
|
|
||||||
|
active := !f.waiter.fired
|
||||||
|
|
||||||
|
f.waiter.fired = false
|
||||||
|
f.waiter.targetTime = f.fakeClock.time.Add(d)
|
||||||
|
|
||||||
|
return active
|
||||||
|
}
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,63 @@
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
load(
|
||||||
|
"@io_bazel_rules_go//go:def.bzl",
|
||||||
|
"go_library",
|
||||||
|
"go_test",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"discovery_client.go",
|
||||||
|
"helper.go",
|
||||||
|
"restmapper.go",
|
||||||
|
"unstructured.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/github.com/emicklei/go-restful-swagger12:go_default_library",
|
||||||
|
"//vendor/github.com/go-openapi/loads:go_default_library",
|
||||||
|
"//vendor/github.com/go-openapi/spec:go_default_library",
|
||||||
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_xtest",
|
||||||
|
srcs = [
|
||||||
|
"discovery_client_test.go",
|
||||||
|
"helper_blackbox_test.go",
|
||||||
|
"restmapper_test.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/github.com/emicklei/go-restful-swagger12:go_default_library",
|
||||||
|
"//vendor/github.com/go-openapi/spec:go_default_library",
|
||||||
|
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/rest/fake:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
|
@ -0,0 +1,464 @@
|
||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/emicklei/go-restful-swagger12"
|
||||||
|
|
||||||
|
"github.com/go-openapi/loads"
|
||||||
|
"github.com/go-openapi/spec"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
|
"k8s.io/apimachinery/pkg/version"
|
||||||
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"k8s.io/client-go/pkg/api/v1"
|
||||||
|
restclient "k8s.io/client-go/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources).
|
||||||
|
const defaultRetries = 2
|
||||||
|
|
||||||
|
// DiscoveryInterface holds the methods that discover server-supported API groups,
|
||||||
|
// versions and resources.
|
||||||
|
type DiscoveryInterface interface {
|
||||||
|
RESTClient() restclient.Interface
|
||||||
|
ServerGroupsInterface
|
||||||
|
ServerResourcesInterface
|
||||||
|
ServerVersionInterface
|
||||||
|
SwaggerSchemaInterface
|
||||||
|
OpenAPISchemaInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness.
|
||||||
|
type CachedDiscoveryInterface interface {
|
||||||
|
DiscoveryInterface
|
||||||
|
// Fresh returns true if no cached data was used that had been retrieved before the instantiation.
|
||||||
|
Fresh() bool
|
||||||
|
// Invalidate enforces that no cached data is used in the future that is older than the current time.
|
||||||
|
Invalidate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerGroupsInterface has methods for obtaining supported groups on the API server
|
||||||
|
type ServerGroupsInterface interface {
|
||||||
|
// ServerGroups returns the supported groups, with information like supported versions and the
|
||||||
|
// preferred version.
|
||||||
|
ServerGroups() (*metav1.APIGroupList, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerResourcesInterface has methods for obtaining supported resources on the API server
|
||||||
|
type ServerResourcesInterface interface {
|
||||||
|
// ServerResourcesForGroupVersion returns the supported resources for a group and version.
|
||||||
|
ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error)
|
||||||
|
// ServerResources returns the supported resources for all groups and versions.
|
||||||
|
ServerResources() ([]*metav1.APIResourceList, error)
|
||||||
|
// ServerPreferredResources returns the supported resources with the version preferred by the
|
||||||
|
// server.
|
||||||
|
ServerPreferredResources() ([]*metav1.APIResourceList, error)
|
||||||
|
// ServerPreferredNamespacedResources returns the supported namespaced resources with the
|
||||||
|
// version preferred by the server.
|
||||||
|
ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerVersionInterface has a method for retrieving the server's version.
|
||||||
|
type ServerVersionInterface interface {
|
||||||
|
// ServerVersion retrieves and parses the server's version (git version).
|
||||||
|
ServerVersion() (*version.Info, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SwaggerSchemaInterface has a method to retrieve the swagger schema.
|
||||||
|
type SwaggerSchemaInterface interface {
|
||||||
|
// SwaggerSchema retrieves and parses the swagger API schema the server supports.
|
||||||
|
SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenAPISchemaInterface has a method to retrieve the open API schema.
|
||||||
|
type OpenAPISchemaInterface interface {
|
||||||
|
// OpenAPISchema retrieves and parses the swagger API schema the server supports.
|
||||||
|
OpenAPISchema() (*spec.Swagger, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscoveryClient implements the functions that discover server-supported API groups,
|
||||||
|
// versions and resources.
|
||||||
|
type DiscoveryClient struct {
|
||||||
|
restClient restclient.Interface
|
||||||
|
|
||||||
|
LegacyPrefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so
|
||||||
|
// group would be "".
|
||||||
|
func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) {
|
||||||
|
groupVersions := []metav1.GroupVersionForDiscovery{}
|
||||||
|
for _, version := range apiVersions.Versions {
|
||||||
|
groupVersion := metav1.GroupVersionForDiscovery{
|
||||||
|
GroupVersion: version,
|
||||||
|
Version: version,
|
||||||
|
}
|
||||||
|
groupVersions = append(groupVersions, groupVersion)
|
||||||
|
}
|
||||||
|
apiGroup.Versions = groupVersions
|
||||||
|
// There should be only one groupVersion returned at /api
|
||||||
|
apiGroup.PreferredVersion = groupVersions[0]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerGroups returns the supported groups, with information like supported versions and the
|
||||||
|
// preferred version.
|
||||||
|
func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) {
|
||||||
|
// Get the groupVersions exposed at /api
|
||||||
|
v := &metav1.APIVersions{}
|
||||||
|
err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v)
|
||||||
|
apiGroup := metav1.APIGroup{}
|
||||||
|
if err == nil && len(v.Versions) != 0 {
|
||||||
|
apiGroup = apiVersionsToAPIGroup(v)
|
||||||
|
}
|
||||||
|
if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the groupVersions exposed at /apis
|
||||||
|
apiGroupList = &metav1.APIGroupList{}
|
||||||
|
err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList)
|
||||||
|
if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api
|
||||||
|
if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
|
||||||
|
apiGroupList = &metav1.APIGroupList{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// append the group retrieved from /api to the list if not empty
|
||||||
|
if len(v.Versions) != 0 {
|
||||||
|
apiGroupList.Groups = append(apiGroupList.Groups, apiGroup)
|
||||||
|
}
|
||||||
|
return apiGroupList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerResourcesForGroupVersion returns the supported resources for a group and version.
|
||||||
|
func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) {
|
||||||
|
url := url.URL{}
|
||||||
|
if len(groupVersion) == 0 {
|
||||||
|
return nil, fmt.Errorf("groupVersion shouldn't be empty")
|
||||||
|
}
|
||||||
|
if len(d.LegacyPrefix) > 0 && groupVersion == "v1" {
|
||||||
|
url.Path = d.LegacyPrefix + "/" + groupVersion
|
||||||
|
} else {
|
||||||
|
url.Path = "/apis/" + groupVersion
|
||||||
|
}
|
||||||
|
resources = &metav1.APIResourceList{
|
||||||
|
GroupVersion: groupVersion,
|
||||||
|
}
|
||||||
|
err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources)
|
||||||
|
if err != nil {
|
||||||
|
// ignore 403 or 404 error to be compatible with an v1.0 server.
|
||||||
|
if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
|
||||||
|
return resources, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resources, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// serverResources returns the supported resources for all groups and versions.
|
||||||
|
func (d *DiscoveryClient) serverResources(failEarly bool) ([]*metav1.APIResourceList, error) {
|
||||||
|
apiGroups, err := d.ServerGroups()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := []*metav1.APIResourceList{}
|
||||||
|
failedGroups := make(map[schema.GroupVersion]error)
|
||||||
|
|
||||||
|
for _, apiGroup := range apiGroups.Groups {
|
||||||
|
for _, version := range apiGroup.Versions {
|
||||||
|
gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
|
||||||
|
resources, err := d.ServerResourcesForGroupVersion(version.GroupVersion)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: maybe restrict this to NotFound errors
|
||||||
|
failedGroups[gv] = err
|
||||||
|
if failEarly {
|
||||||
|
return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, resources)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(failedGroups) == 0 {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, &ErrGroupDiscoveryFailed{Groups: failedGroups}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerResources returns the supported resources for all groups and versions.
|
||||||
|
func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) {
|
||||||
|
return withRetries(defaultRetries, d.serverResources)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load.
|
||||||
|
type ErrGroupDiscoveryFailed struct {
|
||||||
|
// Groups is a list of the groups that failed to load and the error cause
|
||||||
|
Groups map[schema.GroupVersion]error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface
|
||||||
|
func (e *ErrGroupDiscoveryFailed) Error() string {
|
||||||
|
var groups []string
|
||||||
|
for k, v := range e.Groups {
|
||||||
|
groups = append(groups, fmt.Sprintf("%s: %v", k, v))
|
||||||
|
}
|
||||||
|
sort.Strings(groups)
|
||||||
|
return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover
|
||||||
|
// a complete list of APIs for the client to use.
|
||||||
|
func IsGroupDiscoveryFailedError(err error) bool {
|
||||||
|
_, ok := err.(*ErrGroupDiscoveryFailed)
|
||||||
|
return err != nil && ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// serverPreferredResources returns the supported resources with the version preferred by the server.
|
||||||
|
func (d *DiscoveryClient) serverPreferredResources(failEarly bool) ([]*metav1.APIResourceList, error) {
|
||||||
|
serverGroupList, err := d.ServerGroups()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := []*metav1.APIResourceList{}
|
||||||
|
failedGroups := make(map[schema.GroupVersion]error)
|
||||||
|
|
||||||
|
grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource
|
||||||
|
grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource
|
||||||
|
gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping
|
||||||
|
|
||||||
|
for _, apiGroup := range serverGroupList.Groups {
|
||||||
|
for _, version := range apiGroup.Versions {
|
||||||
|
groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version}
|
||||||
|
apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: maybe restrict this to NotFound errors
|
||||||
|
failedGroups[groupVersion] = err
|
||||||
|
if failEarly {
|
||||||
|
return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// create empty list which is filled later in another loop
|
||||||
|
emptyApiResourceList := metav1.APIResourceList{
|
||||||
|
GroupVersion: version.GroupVersion,
|
||||||
|
}
|
||||||
|
gvApiResourceLists[groupVersion] = &emptyApiResourceList
|
||||||
|
result = append(result, &emptyApiResourceList)
|
||||||
|
|
||||||
|
for i := range apiResourceList.APIResources {
|
||||||
|
apiResource := &apiResourceList.APIResources[i]
|
||||||
|
if strings.Contains(apiResource.Name, "/") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name}
|
||||||
|
if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version {
|
||||||
|
// only override with preferred version
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
grVersions[gv] = version.Version
|
||||||
|
grApiResources[gv] = apiResource
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// group selected APIResources according to GroupVersion into APIResourceLists
|
||||||
|
for groupResource, apiResource := range grApiResources {
|
||||||
|
version := grVersions[groupResource]
|
||||||
|
groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version}
|
||||||
|
apiResourceList := gvApiResourceLists[groupVersion]
|
||||||
|
apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(failedGroups) == 0 {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, &ErrGroupDiscoveryFailed{Groups: failedGroups}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerPreferredResources returns the supported resources with the version preferred by the
|
||||||
|
// server.
|
||||||
|
func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
|
||||||
|
return withRetries(defaultRetries, func(retryEarly bool) ([]*metav1.APIResourceList, error) {
|
||||||
|
return d.serverPreferredResources(retryEarly)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerPreferredNamespacedResources returns the supported namespaced resources with the
|
||||||
|
// version preferred by the server.
|
||||||
|
func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
|
||||||
|
all, err := d.ServerPreferredResources()
|
||||||
|
return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool {
|
||||||
|
return r.Namespaced
|
||||||
|
}), all), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerVersion retrieves and parses the server's version (git version).
|
||||||
|
func (d *DiscoveryClient) ServerVersion() (*version.Info, error) {
|
||||||
|
body, err := d.restClient.Get().AbsPath("/version").Do().Raw()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var info version.Info
|
||||||
|
err = json.Unmarshal(body, &info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("got '%s': %v", string(body), err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SwaggerSchema retrieves and parses the swagger API schema the server supports.
|
||||||
|
// TODO: Replace usages with Open API. Tracked in https://github.com/kubernetes/kubernetes/issues/44589
|
||||||
|
func (d *DiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) {
|
||||||
|
if version.Empty() {
|
||||||
|
return nil, fmt.Errorf("groupVersion cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
groupList, err := d.ServerGroups()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
groupVersions := metav1.ExtractGroupVersions(groupList)
|
||||||
|
// This check also takes care the case that kubectl is newer than the running endpoint
|
||||||
|
if stringDoesntExistIn(version.String(), groupVersions) {
|
||||||
|
return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions)
|
||||||
|
}
|
||||||
|
var path string
|
||||||
|
if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion {
|
||||||
|
path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version
|
||||||
|
} else {
|
||||||
|
path = "/swaggerapi/apis/" + version.Group + "/" + version.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := d.restClient.Get().AbsPath(path).Do().Raw()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var schema swagger.ApiDeclaration
|
||||||
|
err = json.Unmarshal(body, &schema)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("got '%s': %v", string(body), err)
|
||||||
|
}
|
||||||
|
return &schema, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenAPISchema fetches the open api schema using a rest client and parses the json.
|
||||||
|
// Warning: this is very expensive (~1.2s)
|
||||||
|
func (d *DiscoveryClient) OpenAPISchema() (*spec.Swagger, error) {
|
||||||
|
data, err := d.restClient.Get().AbsPath("/swagger.json").Do().Raw()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
msg := json.RawMessage(data)
|
||||||
|
doc, err := loads.Analyzed(msg, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return doc.Spec(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns.
|
||||||
|
func withRetries(maxRetries int, f func(failEarly bool) ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) {
|
||||||
|
var result []*metav1.APIResourceList
|
||||||
|
var err error
|
||||||
|
for i := 0; i < maxRetries; i++ {
|
||||||
|
failEarly := i < maxRetries-1
|
||||||
|
result, err = f(failEarly)
|
||||||
|
if err == nil {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
if _, ok := err.(*ErrGroupDiscoveryFailed); !ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDiscoveryDefaults(config *restclient.Config) error {
|
||||||
|
config.APIPath = ""
|
||||||
|
config.GroupVersion = nil
|
||||||
|
codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()}
|
||||||
|
config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
|
||||||
|
if len(config.UserAgent) == 0 {
|
||||||
|
config.UserAgent = restclient.DefaultKubernetesUserAgent()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client
|
||||||
|
// can be used to discover supported resources in the API server.
|
||||||
|
func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) {
|
||||||
|
config := *c
|
||||||
|
if err := setDiscoveryDefaults(&config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client, err := restclient.UnversionedRESTClientFor(&config)
|
||||||
|
return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If
|
||||||
|
// there is an error, it panics.
|
||||||
|
func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient {
|
||||||
|
client, err := NewDiscoveryClientForConfig(c)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return client
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new DiscoveryClient for the given RESTClient.
|
||||||
|
func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient {
|
||||||
|
return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringDoesntExistIn(str string, slice []string) bool {
|
||||||
|
for _, s := range slice {
|
||||||
|
if s == str {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTClient returns a RESTClient that is used to communicate
|
||||||
|
// with API server by this client implementation.
|
||||||
|
func (c *DiscoveryClient) RESTClient() restclient.Interface {
|
||||||
|
if c == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return c.restClient
|
||||||
|
}
|
|
@ -0,0 +1,121 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
apimachineryversion "k8s.io/apimachinery/pkg/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MatchesServerVersion queries the server to compares the build version
|
||||||
|
// (git hash) of the client with the server's build version. It returns an error
|
||||||
|
// if it failed to contact the server or if the versions are not an exact match.
|
||||||
|
func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error {
|
||||||
|
sVer, err := client.ServerVersion()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't read version from server: %v\n", err)
|
||||||
|
}
|
||||||
|
// GitVersion includes GitCommit and GitTreeState, but best to be safe?
|
||||||
|
if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState {
|
||||||
|
return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerSupportsVersion returns an error if the server doesn't have the required version
|
||||||
|
func ServerSupportsVersion(client DiscoveryInterface, requiredGV schema.GroupVersion) error {
|
||||||
|
groups, err := client.ServerGroups()
|
||||||
|
if err != nil {
|
||||||
|
// This is almost always a connection error, and higher level code should treat this as a generic error,
|
||||||
|
// not a negotiation specific error.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
versions := metav1.ExtractGroupVersions(groups)
|
||||||
|
serverVersions := sets.String{}
|
||||||
|
for _, v := range versions {
|
||||||
|
serverVersions.Insert(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
if serverVersions.Has(requiredGV.String()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the server supports no versions, then we should pretend it has the version because of old servers.
|
||||||
|
// This can happen because discovery fails due to 403 Forbidden errors
|
||||||
|
if len(serverVersions) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("server does not support API version %q", requiredGV)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupVersionResources converts APIResourceLists to the GroupVersionResources.
|
||||||
|
func GroupVersionResources(rls []*metav1.APIResourceList) (map[schema.GroupVersionResource]struct{}, error) {
|
||||||
|
gvrs := map[schema.GroupVersionResource]struct{}{}
|
||||||
|
for _, rl := range rls {
|
||||||
|
gv, err := schema.ParseGroupVersion(rl.GroupVersion)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := range rl.APIResources {
|
||||||
|
gvrs[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return gvrs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilteredBy filters by the given predicate. Empty APIResourceLists are dropped.
|
||||||
|
func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1.APIResourceList {
|
||||||
|
result := []*metav1.APIResourceList{}
|
||||||
|
for _, rl := range rls {
|
||||||
|
filtered := *rl
|
||||||
|
filtered.APIResources = nil
|
||||||
|
for i := range rl.APIResources {
|
||||||
|
if pred.Match(rl.GroupVersion, &rl.APIResources[i]) {
|
||||||
|
filtered.APIResources = append(filtered.APIResources, rl.APIResources[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if filtered.APIResources != nil {
|
||||||
|
result = append(result, &filtered)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourcePredicate interface {
|
||||||
|
Match(groupVersion string, r *metav1.APIResource) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool
|
||||||
|
|
||||||
|
func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool {
|
||||||
|
return fn(groupVersion, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportsAllVerbs is a predicate matching a resource iff all given verbs are supported.
|
||||||
|
type SupportsAllVerbs struct {
|
||||||
|
Verbs []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool {
|
||||||
|
return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...)
|
||||||
|
}
|
|
@ -0,0 +1,304 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIGroupResources is an API group with a mapping of versions to
|
||||||
|
// resources.
|
||||||
|
type APIGroupResources struct {
|
||||||
|
Group metav1.APIGroup
|
||||||
|
// A mapping of version string to a slice of APIResources for
|
||||||
|
// that version.
|
||||||
|
VersionedResources map[string][]metav1.APIResource
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRESTMapper returns a PriorityRESTMapper based on the discovered
|
||||||
|
// groups and resources passed in.
|
||||||
|
func NewRESTMapper(groupResources []*APIGroupResources, versionInterfaces meta.VersionInterfacesFunc) meta.RESTMapper {
|
||||||
|
unionMapper := meta.MultiRESTMapper{}
|
||||||
|
|
||||||
|
var groupPriority []string
|
||||||
|
// /v1 is special. It should always come first
|
||||||
|
resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}}
|
||||||
|
kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}}
|
||||||
|
|
||||||
|
for _, group := range groupResources {
|
||||||
|
groupPriority = append(groupPriority, group.Group.Name)
|
||||||
|
|
||||||
|
if len(group.Group.PreferredVersion.Version) != 0 {
|
||||||
|
preferred := group.Group.PreferredVersion.Version
|
||||||
|
if _, ok := group.VersionedResources[preferred]; ok {
|
||||||
|
resourcePriority = append(resourcePriority, schema.GroupVersionResource{
|
||||||
|
Group: group.Group.Name,
|
||||||
|
Version: group.Group.PreferredVersion.Version,
|
||||||
|
Resource: meta.AnyResource,
|
||||||
|
})
|
||||||
|
|
||||||
|
kindPriority = append(kindPriority, schema.GroupVersionKind{
|
||||||
|
Group: group.Group.Name,
|
||||||
|
Version: group.Group.PreferredVersion.Version,
|
||||||
|
Kind: meta.AnyKind,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, discoveryVersion := range group.Group.Versions {
|
||||||
|
resources, ok := group.VersionedResources[discoveryVersion.Version]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version}
|
||||||
|
versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}, versionInterfaces)
|
||||||
|
|
||||||
|
for _, resource := range resources {
|
||||||
|
scope := meta.RESTScopeNamespace
|
||||||
|
if !resource.Namespaced {
|
||||||
|
scope = meta.RESTScopeRoot
|
||||||
|
}
|
||||||
|
versionMapper.Add(gv.WithKind(resource.Kind), scope)
|
||||||
|
// TODO only do this if it supports listing
|
||||||
|
versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope)
|
||||||
|
}
|
||||||
|
// TODO why is this type not in discovery (at least for "v1")
|
||||||
|
versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot)
|
||||||
|
unionMapper = append(unionMapper, versionMapper)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, group := range groupPriority {
|
||||||
|
resourcePriority = append(resourcePriority, schema.GroupVersionResource{
|
||||||
|
Group: group,
|
||||||
|
Version: meta.AnyVersion,
|
||||||
|
Resource: meta.AnyResource,
|
||||||
|
})
|
||||||
|
kindPriority = append(kindPriority, schema.GroupVersionKind{
|
||||||
|
Group: group,
|
||||||
|
Version: meta.AnyVersion,
|
||||||
|
Kind: meta.AnyKind,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return meta.PriorityRESTMapper{
|
||||||
|
Delegate: unionMapper,
|
||||||
|
ResourcePriority: resourcePriority,
|
||||||
|
KindPriority: kindPriority,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAPIGroupResources uses the provided discovery client to gather
|
||||||
|
// discovery information and populate a slice of APIGroupResources.
|
||||||
|
func GetAPIGroupResources(cl DiscoveryInterface) ([]*APIGroupResources, error) {
|
||||||
|
apiGroups, err := cl.ServerGroups()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var result []*APIGroupResources
|
||||||
|
for _, group := range apiGroups.Groups {
|
||||||
|
groupResources := &APIGroupResources{
|
||||||
|
Group: group,
|
||||||
|
VersionedResources: make(map[string][]metav1.APIResource),
|
||||||
|
}
|
||||||
|
for _, version := range group.Versions {
|
||||||
|
resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion)
|
||||||
|
if err != nil {
|
||||||
|
// continue as best we can
|
||||||
|
// TODO track the errors and update callers to handle partial errors.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
groupResources.VersionedResources[version.Version] = resources.APIResources
|
||||||
|
}
|
||||||
|
result = append(result, groupResources)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeferredDiscoveryRESTMapper is a RESTMapper that will defer
|
||||||
|
// initialization of the RESTMapper until the first mapping is
|
||||||
|
// requested.
|
||||||
|
type DeferredDiscoveryRESTMapper struct {
|
||||||
|
initMu sync.Mutex
|
||||||
|
delegate meta.RESTMapper
|
||||||
|
cl CachedDiscoveryInterface
|
||||||
|
versionInterface meta.VersionInterfacesFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDeferredDiscoveryRESTMapper returns a
|
||||||
|
// DeferredDiscoveryRESTMapper that will lazily query the provided
|
||||||
|
// client for discovery information to do REST mappings.
|
||||||
|
func NewDeferredDiscoveryRESTMapper(cl CachedDiscoveryInterface, versionInterface meta.VersionInterfacesFunc) *DeferredDiscoveryRESTMapper {
|
||||||
|
return &DeferredDiscoveryRESTMapper{
|
||||||
|
cl: cl,
|
||||||
|
versionInterface: versionInterface,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) {
|
||||||
|
d.initMu.Lock()
|
||||||
|
defer d.initMu.Unlock()
|
||||||
|
|
||||||
|
if d.delegate != nil {
|
||||||
|
return d.delegate, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
groupResources, err := GetAPIGroupResources(d.cl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.delegate = NewRESTMapper(groupResources, d.versionInterface)
|
||||||
|
return d.delegate, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the internally cached Discovery information and will
|
||||||
|
// cause the next mapping request to re-discover.
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) Reset() {
|
||||||
|
glog.V(5).Info("Invalidating discovery information")
|
||||||
|
|
||||||
|
d.initMu.Lock()
|
||||||
|
defer d.initMu.Unlock()
|
||||||
|
|
||||||
|
d.cl.Invalidate()
|
||||||
|
d.delegate = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFor takes a partial resource and returns back the single match.
|
||||||
|
// It returns an error if there are multiple matches.
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) {
|
||||||
|
del, err := d.getDelegate()
|
||||||
|
if err != nil {
|
||||||
|
return schema.GroupVersionKind{}, err
|
||||||
|
}
|
||||||
|
gvk, err = del.KindFor(resource)
|
||||||
|
if err != nil && !d.cl.Fresh() {
|
||||||
|
d.Reset()
|
||||||
|
gvk, err = d.KindFor(resource)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindsFor takes a partial resource and returns back the list of
|
||||||
|
// potential kinds in priority order.
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) {
|
||||||
|
del, err := d.getDelegate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
gvks, err = del.KindsFor(resource)
|
||||||
|
if len(gvks) == 0 && !d.cl.Fresh() {
|
||||||
|
d.Reset()
|
||||||
|
gvks, err = d.KindsFor(resource)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceFor takes a partial resource and returns back the single
|
||||||
|
// match. It returns an error if there are multiple matches.
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) {
|
||||||
|
del, err := d.getDelegate()
|
||||||
|
if err != nil {
|
||||||
|
return schema.GroupVersionResource{}, err
|
||||||
|
}
|
||||||
|
gvr, err = del.ResourceFor(input)
|
||||||
|
if err != nil && !d.cl.Fresh() {
|
||||||
|
d.Reset()
|
||||||
|
gvr, err = d.ResourceFor(input)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourcesFor takes a partial resource and returns back the list of
|
||||||
|
// potential resource in priority order.
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) {
|
||||||
|
del, err := d.getDelegate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
gvrs, err = del.ResourcesFor(input)
|
||||||
|
if len(gvrs) == 0 && !d.cl.Fresh() {
|
||||||
|
d.Reset()
|
||||||
|
gvrs, err = d.ResourcesFor(input)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMapping identifies a preferred resource mapping for the
|
||||||
|
// provided group kind.
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) {
|
||||||
|
del, err := d.getDelegate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m, err = del.RESTMapping(gk, versions...)
|
||||||
|
if err != nil && !d.cl.Fresh() {
|
||||||
|
d.Reset()
|
||||||
|
m, err = d.RESTMapping(gk, versions...)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMappings returns the RESTMappings for the provided group kind
|
||||||
|
// in a rough internal preferred order. If no kind is found, it will
|
||||||
|
// return a NoResourceMatchError.
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) {
|
||||||
|
del, err := d.getDelegate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ms, err = del.RESTMappings(gk, versions...)
|
||||||
|
if len(ms) == 0 && !d.cl.Fresh() {
|
||||||
|
d.Reset()
|
||||||
|
ms, err = d.RESTMappings(gk, versions...)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceSingularizer converts a resource name from plural to
|
||||||
|
// singular (e.g., from pods to pod).
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
|
||||||
|
del, err := d.getDelegate()
|
||||||
|
if err != nil {
|
||||||
|
return resource, err
|
||||||
|
}
|
||||||
|
singular, err = del.ResourceSingularizer(resource)
|
||||||
|
if err != nil && !d.cl.Fresh() {
|
||||||
|
d.Reset()
|
||||||
|
singular, err = d.ResourceSingularizer(resource)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeferredDiscoveryRESTMapper) String() string {
|
||||||
|
del, err := d.getDelegate()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure it satisfies the interface
|
||||||
|
var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{}
|
|
@ -0,0 +1,95 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnstructuredObjectTyper provides a runtime.ObjectTyper implmentation for
|
||||||
|
// runtime.Unstructured object based on discovery information.
|
||||||
|
type UnstructuredObjectTyper struct {
|
||||||
|
registered map[schema.GroupVersionKind]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnstructuredObjectTyper returns a runtime.ObjectTyper for
|
||||||
|
// unstructred objects based on discovery information.
|
||||||
|
func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *UnstructuredObjectTyper {
|
||||||
|
dot := &UnstructuredObjectTyper{registered: make(map[schema.GroupVersionKind]bool)}
|
||||||
|
for _, group := range groupResources {
|
||||||
|
for _, discoveryVersion := range group.Group.Versions {
|
||||||
|
resources, ok := group.VersionedResources[discoveryVersion.Version]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version}
|
||||||
|
for _, resource := range resources {
|
||||||
|
dot.registered[gv.WithKind(resource.Kind)] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dot
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectKind returns the group,version,kind of the provided object, or an error
|
||||||
|
// if the object in not runtime.Unstructured or has no group,version,kind
|
||||||
|
// information.
|
||||||
|
func (d *UnstructuredObjectTyper) ObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) {
|
||||||
|
if _, ok := obj.(runtime.Unstructured); !ok {
|
||||||
|
return schema.GroupVersionKind{}, fmt.Errorf("type %T is invalid for dynamic object typer", obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj.GetObjectKind().GroupVersionKind(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectKinds returns a slice of one element with the group,version,kind of the
|
||||||
|
// provided object, or an error if the object is not runtime.Unstructured or
|
||||||
|
// has no group,version,kind information. unversionedType will always be false
|
||||||
|
// because runtime.Unstructured object should always have group,version,kind
|
||||||
|
// information set.
|
||||||
|
func (d *UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) {
|
||||||
|
gvk, err := d.ObjectKind(obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return []schema.GroupVersionKind{gvk}, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recognizes returns true if the provided group,version,kind was in the
|
||||||
|
// discovery information.
|
||||||
|
func (d *UnstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool {
|
||||||
|
return d.registered[gvk]
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUnversioned returns false always because runtime.Unstructured objects
|
||||||
|
// should always have group,version,kind information set. ok will be true if the
|
||||||
|
// object's group,version,kind is api.Registry.
|
||||||
|
func (d *UnstructuredObjectTyper) IsUnversioned(obj runtime.Object) (unversioned bool, ok bool) {
|
||||||
|
gvk, err := d.ObjectKind(obj)
|
||||||
|
if err != nil {
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, d.registered[gvk]
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ runtime.ObjectTyper = &UnstructuredObjectTyper{}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
load(
|
||||||
|
"@io_bazel_rules_go//go:def.bzl",
|
||||||
|
"go_library",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"doc.go",
|
||||||
|
"register.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/apps/v1beta1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/authentication/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/authorization/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/autoscaling/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/batch/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/policy/v1beta1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/storage/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/storage/v1beta1:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
|
@ -0,0 +1,20 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This package is generated by client-gen with custom arguments.
|
||||||
|
|
||||||
|
// This package contains the scheme of the automatically generated clientset.
|
||||||
|
package scheme
|
|
@ -0,0 +1,87 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package scheme
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
|
corev1 "k8s.io/client-go/pkg/api/v1"
|
||||||
|
appsv1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||||
|
authenticationv1 "k8s.io/client-go/pkg/apis/authentication/v1"
|
||||||
|
authenticationv1beta1 "k8s.io/client-go/pkg/apis/authentication/v1beta1"
|
||||||
|
authorizationv1 "k8s.io/client-go/pkg/apis/authorization/v1"
|
||||||
|
authorizationv1beta1 "k8s.io/client-go/pkg/apis/authorization/v1beta1"
|
||||||
|
autoscalingv1 "k8s.io/client-go/pkg/apis/autoscaling/v1"
|
||||||
|
autoscalingv2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1"
|
||||||
|
batchv1 "k8s.io/client-go/pkg/apis/batch/v1"
|
||||||
|
batchv2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1"
|
||||||
|
certificatesv1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1"
|
||||||
|
extensionsv1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||||
|
policyv1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||||
|
rbacv1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1"
|
||||||
|
rbacv1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1"
|
||||||
|
settingsv1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1"
|
||||||
|
storagev1 "k8s.io/client-go/pkg/apis/storage/v1"
|
||||||
|
storagev1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Scheme = runtime.NewScheme()
|
||||||
|
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||||
|
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||||
|
AddToScheme(Scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||||
|
// of clientsets, like in:
|
||||||
|
//
|
||||||
|
// import (
|
||||||
|
// "k8s.io/client-go/kubernetes"
|
||||||
|
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
|
||||||
|
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||||
|
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||||
|
//
|
||||||
|
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||||
|
// correctly.
|
||||||
|
func AddToScheme(scheme *runtime.Scheme) {
|
||||||
|
corev1.AddToScheme(scheme)
|
||||||
|
appsv1beta1.AddToScheme(scheme)
|
||||||
|
authenticationv1.AddToScheme(scheme)
|
||||||
|
authenticationv1beta1.AddToScheme(scheme)
|
||||||
|
authorizationv1.AddToScheme(scheme)
|
||||||
|
authorizationv1beta1.AddToScheme(scheme)
|
||||||
|
autoscalingv1.AddToScheme(scheme)
|
||||||
|
autoscalingv2alpha1.AddToScheme(scheme)
|
||||||
|
batchv1.AddToScheme(scheme)
|
||||||
|
batchv2alpha1.AddToScheme(scheme)
|
||||||
|
certificatesv1beta1.AddToScheme(scheme)
|
||||||
|
extensionsv1beta1.AddToScheme(scheme)
|
||||||
|
policyv1beta1.AddToScheme(scheme)
|
||||||
|
rbacv1beta1.AddToScheme(scheme)
|
||||||
|
rbacv1alpha1.AddToScheme(scheme)
|
||||||
|
settingsv1alpha1.AddToScheme(scheme)
|
||||||
|
storagev1beta1.AddToScheme(scheme)
|
||||||
|
storagev1.AddToScheme(scheme)
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
load(
|
||||||
|
"@io_bazel_rules_go//go:def.bzl",
|
||||||
|
"go_library",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"annotation_key_constants.go",
|
||||||
|
"doc.go",
|
||||||
|
"field_constants.go",
|
||||||
|
"json.go",
|
||||||
|
"objectreference.go",
|
||||||
|
"register.go",
|
||||||
|
"resource.go",
|
||||||
|
"taint.go",
|
||||||
|
"toleration.go",
|
||||||
|
"types.go",
|
||||||
|
"zz_generated.deepcopy.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = [
|
||||||
|
"//vendor/k8s.io/client-go/pkg/api:__subpackages__",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis:__subpackages__",
|
||||||
|
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake:__subpackages__",
|
||||||
|
],
|
||||||
|
deps = [
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
|
@ -0,0 +1,44 @@
|
||||||
|
approvers:
|
||||||
|
- erictune
|
||||||
|
- lavalamp
|
||||||
|
- smarterclayton
|
||||||
|
- thockin
|
||||||
|
reviewers:
|
||||||
|
- thockin
|
||||||
|
- lavalamp
|
||||||
|
- smarterclayton
|
||||||
|
- wojtek-t
|
||||||
|
- deads2k
|
||||||
|
- yujuhong
|
||||||
|
- brendandburns
|
||||||
|
- derekwaynecarr
|
||||||
|
- caesarxuchao
|
||||||
|
- vishh
|
||||||
|
- mikedanese
|
||||||
|
- liggitt
|
||||||
|
- nikhiljindal
|
||||||
|
- bprashanth
|
||||||
|
- gmarek
|
||||||
|
- erictune
|
||||||
|
- davidopp
|
||||||
|
- pmorie
|
||||||
|
- sttts
|
||||||
|
- kargakis
|
||||||
|
- dchen1107
|
||||||
|
- saad-ali
|
||||||
|
- zmerlynn
|
||||||
|
- luxas
|
||||||
|
- janetkuo
|
||||||
|
- justinsb
|
||||||
|
- pwittrock
|
||||||
|
- roberthbailey
|
||||||
|
- ncdc
|
||||||
|
- timstclair
|
||||||
|
- yifan-gu
|
||||||
|
- eparis
|
||||||
|
- mwielgus
|
||||||
|
- timothysc
|
||||||
|
- soltysh
|
||||||
|
- piosz
|
||||||
|
- jsafrane
|
||||||
|
- jbeda
|
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This file should be consistent with pkg/api/v1/annotation_key_constants.go.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods
|
||||||
|
MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
|
||||||
|
|
||||||
|
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
|
||||||
|
// in the Annotations of a Pod.
|
||||||
|
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
|
||||||
|
|
||||||
|
// TaintsAnnotationKey represents the key of taints data (json serialized)
|
||||||
|
// in the Annotations of a Node.
|
||||||
|
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
|
||||||
|
|
||||||
|
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
|
||||||
|
// to all containers of a pod.
|
||||||
|
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
|
||||||
|
|
||||||
|
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
|
||||||
|
// to one container of a pod.
|
||||||
|
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
|
||||||
|
|
||||||
|
// CreatedByAnnotation represents the key used to store the spec(json)
|
||||||
|
// used to create the resource.
|
||||||
|
CreatedByAnnotation = "kubernetes.io/created-by"
|
||||||
|
|
||||||
|
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
|
||||||
|
// in the Annotations of a Node.
|
||||||
|
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
|
||||||
|
|
||||||
|
// SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
|
||||||
|
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
|
||||||
|
// key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by
|
||||||
|
// the kubelet. Pods with other sysctls will fail to launch.
|
||||||
|
SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls"
|
||||||
|
|
||||||
|
// UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
|
||||||
|
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
|
||||||
|
// key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly
|
||||||
|
// namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use
|
||||||
|
// is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet
|
||||||
|
// will fail to launch.
|
||||||
|
UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls"
|
||||||
|
|
||||||
|
// ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
|
||||||
|
// an object (e.g. secret, config map) before fetching it again from apiserver.
|
||||||
|
// This annotation can be attached to node.
|
||||||
|
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
||||||
|
|
||||||
|
// AffinityAnnotationKey represents the key of affinity data (json serialized)
|
||||||
|
// in the Annotations of a Pod.
|
||||||
|
// TODO: remove when alpha support for affinity is removed
|
||||||
|
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
|
||||||
|
|
||||||
|
// annotation key prefix used to identify non-convertible json paths.
|
||||||
|
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
|
||||||
|
|
||||||
|
kubectlPrefix = "kubectl.kubernetes.io/"
|
||||||
|
|
||||||
|
// LastAppliedConfigAnnotation is the annotation used to store the previous
|
||||||
|
// configuration of a resource for use in a three way diff by UpdateApplyAnnotation.
|
||||||
|
LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration"
|
||||||
|
|
||||||
|
// AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers
|
||||||
|
//
|
||||||
|
// It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to
|
||||||
|
// allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow
|
||||||
|
// access only from the CIDRs currently allocated to MIT & the USPS.
|
||||||
|
//
|
||||||
|
// Not all cloud providers support this annotation, though AWS & GCE do.
|
||||||
|
AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
|
||||||
|
|
||||||
|
// AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behavior.
|
||||||
|
AnnotationValueExternalTrafficLocal = "OnlyLocal"
|
||||||
|
// AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behavior.
|
||||||
|
AnnotationValueExternalTrafficGlobal = "Global"
|
||||||
|
|
||||||
|
// TODO: The beta annotations have been deprecated, remove them when we release k8s 1.8.
|
||||||
|
|
||||||
|
// BetaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service.
|
||||||
|
// If not specified, annotation is created by the service api backend with the allocated nodePort.
|
||||||
|
// Will use user-specified nodePort value if specified by the client.
|
||||||
|
BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport"
|
||||||
|
|
||||||
|
// BetaAnnotationExternalTraffic An annotation that denotes if this Service desires to route
|
||||||
|
// external traffic to local endpoints only. This preserves Source IP and avoids a second hop.
|
||||||
|
BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic"
|
||||||
|
)
|
|
@ -0,0 +1,22 @@
|
||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package api contains the latest (or "internal") version of the
|
||||||
|
// Kubernetes API objects. This is the API objects as represented in memory.
|
||||||
|
// The contract presented to clients is located in the versioned packages,
|
||||||
|
// which are sub-directories. The first one is "v1". Those packages
|
||||||
|
// describe how a particular version is serialized to storage/network.
|
||||||
|
package api
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
// Field path constants that are specific to the internal API
|
||||||
|
// representation.
|
||||||
|
const (
|
||||||
|
NodeUnschedulableField = "spec.unschedulable"
|
||||||
|
ObjectNameField = "metadata.name"
|
||||||
|
PodHostField = "spec.nodeName"
|
||||||
|
PodStatusField = "status.phase"
|
||||||
|
SecretTypeField = "type"
|
||||||
|
|
||||||
|
EventReasonField = "reason"
|
||||||
|
EventSourceField = "source"
|
||||||
|
EventTypeField = "type"
|
||||||
|
EventInvolvedKindField = "involvedObject.kind"
|
||||||
|
EventInvolvedNamespaceField = "involvedObject.namespace"
|
||||||
|
EventInvolvedNameField = "involvedObject.name"
|
||||||
|
EventInvolvedUIDField = "involvedObject.uid"
|
||||||
|
EventInvolvedAPIVersionField = "involvedObject.apiVersion"
|
||||||
|
EventInvolvedResourceVersionField = "involvedObject.resourceVersion"
|
||||||
|
EventInvolvedFieldPathField = "involvedObject.fieldPath"
|
||||||
|
)
|
|
@ -0,0 +1,28 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "encoding/json"
|
||||||
|
|
||||||
|
// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations
|
||||||
|
// to prevent anyone from marshaling these internal structs.
|
||||||
|
|
||||||
|
var _ = json.Marshaler(&AvoidPods{})
|
||||||
|
var _ = json.Unmarshaler(&AvoidPods{})
|
||||||
|
|
||||||
|
func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") }
|
||||||
|
func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") }
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
//TODO: consider making these methods functions, because we don't want helper
|
||||||
|
//functions in the k8s.io/api repo.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
|
||||||
|
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
|
||||||
|
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
|
|
@ -0,0 +1,135 @@
|
||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||||
|
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details)
|
||||||
|
var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
|
||||||
|
|
||||||
|
// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global
|
||||||
|
// API registry.
|
||||||
|
var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS"))
|
||||||
|
|
||||||
|
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
|
||||||
|
// NOTE: If you are copying this file to start a new api group, STOP! Copy the
|
||||||
|
// extensions group instead. This Scheme is special and should appear ONLY in
|
||||||
|
// the api group, unless you really know what you're doing.
|
||||||
|
// TODO(lavalamp): make the above error impossible.
|
||||||
|
var Scheme = runtime.NewScheme()
|
||||||
|
|
||||||
|
// Codecs provides access to encoding and decoding for the scheme
|
||||||
|
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||||
|
|
||||||
|
// GroupName is the group name use in this package
|
||||||
|
const GroupName = ""
|
||||||
|
|
||||||
|
// SchemeGroupVersion is group version used to register these objects
|
||||||
|
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||||
|
|
||||||
|
// Unversioned is group version for unversioned API objects
|
||||||
|
// TODO: this should be v1 probably
|
||||||
|
var Unversioned = schema.GroupVersion{Group: "", Version: "v1"}
|
||||||
|
|
||||||
|
// ParameterCodec handles versioning of objects that are converted to query parameters.
|
||||||
|
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||||
|
|
||||||
|
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||||
|
func Kind(kind string) schema.GroupKind {
|
||||||
|
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||||
|
func Resource(resource string) schema.GroupResource {
|
||||||
|
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||||
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
|
)
|
||||||
|
|
||||||
|
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||||
|
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||||
|
&Pod{},
|
||||||
|
&PodList{},
|
||||||
|
&PodStatusResult{},
|
||||||
|
&PodTemplate{},
|
||||||
|
&PodTemplateList{},
|
||||||
|
&ReplicationControllerList{},
|
||||||
|
&ReplicationController{},
|
||||||
|
&ServiceList{},
|
||||||
|
&Service{},
|
||||||
|
&ServiceProxyOptions{},
|
||||||
|
&NodeList{},
|
||||||
|
&Node{},
|
||||||
|
&NodeProxyOptions{},
|
||||||
|
&Endpoints{},
|
||||||
|
&EndpointsList{},
|
||||||
|
&Binding{},
|
||||||
|
&Event{},
|
||||||
|
&EventList{},
|
||||||
|
&List{},
|
||||||
|
&LimitRange{},
|
||||||
|
&LimitRangeList{},
|
||||||
|
&ResourceQuota{},
|
||||||
|
&ResourceQuotaList{},
|
||||||
|
&Namespace{},
|
||||||
|
&NamespaceList{},
|
||||||
|
&ServiceAccount{},
|
||||||
|
&ServiceAccountList{},
|
||||||
|
&Secret{},
|
||||||
|
&SecretList{},
|
||||||
|
&PersistentVolume{},
|
||||||
|
&PersistentVolumeList{},
|
||||||
|
&PersistentVolumeClaim{},
|
||||||
|
&PersistentVolumeClaimList{},
|
||||||
|
&PodAttachOptions{},
|
||||||
|
&PodLogOptions{},
|
||||||
|
&PodExecOptions{},
|
||||||
|
&PodPortForwardOptions{},
|
||||||
|
&PodProxyOptions{},
|
||||||
|
&ComponentStatus{},
|
||||||
|
&ComponentStatusList{},
|
||||||
|
&SerializedReference{},
|
||||||
|
&RangeAllocation{},
|
||||||
|
&ConfigMap{},
|
||||||
|
&ConfigMapList{},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register Unversioned types under their own special group
|
||||||
|
scheme.AddUnversionedTypes(Unversioned,
|
||||||
|
&metav1.Status{},
|
||||||
|
&metav1.APIVersions{},
|
||||||
|
&metav1.APIGroupList{},
|
||||||
|
&metav1.APIGroup{},
|
||||||
|
&metav1.APIResourceList{},
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (self ResourceName) String() string {
|
||||||
|
return string(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the CPU limit if specified.
|
||||||
|
func (self *ResourceList) Cpu() *resource.Quantity {
|
||||||
|
if val, ok := (*self)[ResourceCPU]; ok {
|
||||||
|
return &val
|
||||||
|
}
|
||||||
|
return &resource.Quantity{Format: resource.DecimalSI}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the Memory limit if specified.
|
||||||
|
func (self *ResourceList) Memory() *resource.Quantity {
|
||||||
|
if val, ok := (*self)[ResourceMemory]; ok {
|
||||||
|
return &val
|
||||||
|
}
|
||||||
|
return &resource.Quantity{Format: resource.BinarySI}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ResourceList) Pods() *resource.Quantity {
|
||||||
|
if val, ok := (*self)[ResourcePods]; ok {
|
||||||
|
return &val
|
||||||
|
}
|
||||||
|
return &resource.Quantity{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ResourceList) NvidiaGPU() *resource.Quantity {
|
||||||
|
if val, ok := (*self)[ResourceNvidiaGPU]; ok {
|
||||||
|
return &val
|
||||||
|
}
|
||||||
|
return &resource.Quantity{}
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
//TODO: consider making these methods functions, because we don't want helper
|
||||||
|
//functions in the k8s.io/api repo.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
|
||||||
|
// if the two taints have same key:effect, regard as they match.
|
||||||
|
func (t *Taint) MatchTaint(taintToMatch Taint) bool {
|
||||||
|
return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
|
||||||
|
}
|
||||||
|
|
||||||
|
// taint.ToString() converts taint struct to string in format key=value:effect or key:effect.
|
||||||
|
func (t *Taint) ToString() string {
|
||||||
|
if len(t.Value) == 0 {
|
||||||
|
return fmt.Sprintf("%v:%v", t.Key, t.Effect)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
//TODO: consider making these methods functions, because we don't want helper
|
||||||
|
//functions in the k8s.io/api repo.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
|
||||||
|
// if the two tolerations have same <key,effect,operator,value> combination, regard as they match.
|
||||||
|
// TODO: uniqueness check for tolerations in api validations.
|
||||||
|
func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool {
|
||||||
|
return t.Key == tolerationToMatch.Key &&
|
||||||
|
t.Effect == tolerationToMatch.Effect &&
|
||||||
|
t.Operator == tolerationToMatch.Operator &&
|
||||||
|
t.Value == tolerationToMatch.Value
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,51 @@
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
load(
|
||||||
|
"@io_bazel_rules_go//go:def.bzl",
|
||||||
|
"go_library",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"annotation_key_constants.go",
|
||||||
|
"conversion.go",
|
||||||
|
"defaults.go",
|
||||||
|
"doc.go",
|
||||||
|
"generate.go",
|
||||||
|
"generated.pb.go",
|
||||||
|
"meta.go",
|
||||||
|
"objectreference.go",
|
||||||
|
"register.go",
|
||||||
|
"resource.go",
|
||||||
|
"taint.go",
|
||||||
|
"toleration.go",
|
||||||
|
"types.generated.go",
|
||||||
|
"types.go",
|
||||||
|
"types_swagger_doc_generated.go",
|
||||||
|
"zz_generated.conversion.go",
|
||||||
|
"zz_generated.deepcopy.go",
|
||||||
|
"zz_generated.defaults.go",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
deps = [
|
||||||
|
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
|
||||||
|
"//vendor/github.com/gogo/protobuf/sortkeys:go_default_library",
|
||||||
|
"//vendor/github.com/ugorji/go/codec:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/api:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/apis/extensions:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/util:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/pkg/util/parsers:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
|
@ -0,0 +1,41 @@
|
||||||
|
reviewers:
|
||||||
|
- thockin
|
||||||
|
- lavalamp
|
||||||
|
- smarterclayton
|
||||||
|
- wojtek-t
|
||||||
|
- deads2k
|
||||||
|
- yujuhong
|
||||||
|
- brendandburns
|
||||||
|
- derekwaynecarr
|
||||||
|
- caesarxuchao
|
||||||
|
- vishh
|
||||||
|
- mikedanese
|
||||||
|
- liggitt
|
||||||
|
- nikhiljindal
|
||||||
|
- bprashanth
|
||||||
|
- gmarek
|
||||||
|
- erictune
|
||||||
|
- davidopp
|
||||||
|
- pmorie
|
||||||
|
- sttts
|
||||||
|
- kargakis
|
||||||
|
- dchen1107
|
||||||
|
- saad-ali
|
||||||
|
- zmerlynn
|
||||||
|
- luxas
|
||||||
|
- janetkuo
|
||||||
|
- justinsb
|
||||||
|
- roberthbailey
|
||||||
|
- ncdc
|
||||||
|
- timstclair
|
||||||
|
- eparis
|
||||||
|
- timothysc
|
||||||
|
- piosz
|
||||||
|
- jsafrane
|
||||||
|
- dims
|
||||||
|
- errordeveloper
|
||||||
|
- madhusudancs
|
||||||
|
- krousey
|
||||||
|
- jayunit100
|
||||||
|
- rootfs
|
||||||
|
- markturansky
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue