deps: github.com/hashicorp/aws-sdk-go-base@v0.4.0 (#22994)
This commit is contained in:
parent
cf16b42135
commit
6656104f8f
7
go.mod
7
go.mod
|
@ -16,7 +16,7 @@ require (
|
||||||
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2
|
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
|
||||||
github.com/armon/go-radix v1.0.0 // indirect
|
github.com/armon/go-radix v1.0.0 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.22.0
|
github.com/aws/aws-sdk-go v1.25.3
|
||||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect
|
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect
|
||||||
github.com/blang/semver v3.5.1+incompatible
|
github.com/blang/semver v3.5.1+incompatible
|
||||||
github.com/bmatcuk/doublestar v1.1.5
|
github.com/bmatcuk/doublestar v1.1.5
|
||||||
|
@ -39,7 +39,6 @@ require (
|
||||||
github.com/golang/mock v1.3.1
|
github.com/golang/mock v1.3.1
|
||||||
github.com/golang/protobuf v1.3.2
|
github.com/golang/protobuf v1.3.2
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
|
||||||
github.com/google/btree v1.0.0 // indirect
|
|
||||||
github.com/google/go-cmp v0.3.0
|
github.com/google/go-cmp v0.3.0
|
||||||
github.com/google/uuid v1.1.1
|
github.com/google/uuid v1.1.1
|
||||||
github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968
|
github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968
|
||||||
|
@ -49,7 +48,7 @@ require (
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
|
github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
|
||||||
github.com/hashicorp/aws-sdk-go-base v0.3.0
|
github.com/hashicorp/aws-sdk-go-base v0.4.0
|
||||||
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089
|
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089
|
||||||
github.com/hashicorp/errwrap v1.0.0
|
github.com/hashicorp/errwrap v1.0.0
|
||||||
github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2
|
github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2
|
||||||
|
@ -133,3 +132,5 @@ require (
|
||||||
gopkg.in/ini.v1 v1.42.0 // indirect
|
gopkg.in/ini.v1 v1.42.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.2.2
|
gopkg.in/yaml.v2 v2.2.2
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
9
go.sum
9
go.sum
|
@ -53,9 +53,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
|
||||||
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
||||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
|
||||||
github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ=
|
||||||
github.com/aws/aws-sdk-go v1.22.0 h1:e88V6+dSEyBibUy0ekOydtTfNWzqG3hrtCR8SF6UqqY=
|
github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
github.com/aws/aws-sdk-go v1.22.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
|
||||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
|
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
|
||||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||||
|
@ -161,8 +160,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
|
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
github.com/hashicorp/aws-sdk-go-base v0.3.0 h1:CPWKWCuOwpIFNsy8FUI9IT2QI7mGwgVPc4hrXW9I4L4=
|
github.com/hashicorp/aws-sdk-go-base v0.4.0 h1:zH9hNUdsS+2G0zJaU85ul8D59BGnZBaKM+KMNPAHGwk=
|
||||||
github.com/hashicorp/aws-sdk-go-base v0.3.0/go.mod h1:ZIWACGGi0N7a4DZbf15yuE1JQORmWLtBcVM6F5SXNFU=
|
github.com/hashicorp/aws-sdk-go-base v0.4.0/go.mod h1:eRhlz3c4nhqxFZJAahJEFL7gh6Jyj5rQmQc7F9eHFyQ=
|
||||||
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 h1:1eDpXAxTh0iPv+1kc9/gfSI2pxRERDsTk/lNGolwHn8=
|
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 h1:1eDpXAxTh0iPv+1kc9/gfSI2pxRERDsTk/lNGolwHn8=
|
||||||
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
|
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
|
||||||
github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
|
|
@ -1,307 +0,0 @@
|
||||||
// Copyright (c) 2014 Couchbase, Inc.
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
|
||||||
// except in compliance with the License. You may obtain a copy of the License at
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
// Unless required by applicable law or agreed to in writing, software distributed under the
|
|
||||||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
|
||||||
// either express or implied. See the License for the specific language governing permissions
|
|
||||||
// and limitations under the License.
|
|
||||||
|
|
||||||
// Modified by Martin Atkins to serve the needs of package textseg.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
var url = flag.String("url",
|
|
||||||
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/",
|
|
||||||
"URL of Unicode database directory")
|
|
||||||
var verbose = flag.Bool("verbose",
|
|
||||||
false,
|
|
||||||
"write data to stdout as it is parsed")
|
|
||||||
var localFiles = flag.Bool("local",
|
|
||||||
false,
|
|
||||||
"data files have been copied to the current directory; for debugging only")
|
|
||||||
var outputFile = flag.String("output",
|
|
||||||
"",
|
|
||||||
"output file for generated tables; default stdout")
|
|
||||||
|
|
||||||
var output *bufio.Writer
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
setupOutput()
|
|
||||||
|
|
||||||
graphemePropertyRanges := make(map[string]*unicode.RangeTable)
|
|
||||||
loadUnicodeData("GraphemeBreakProperty.txt", graphemePropertyRanges)
|
|
||||||
wordPropertyRanges := make(map[string]*unicode.RangeTable)
|
|
||||||
loadUnicodeData("WordBreakProperty.txt", wordPropertyRanges)
|
|
||||||
sentencePropertyRanges := make(map[string]*unicode.RangeTable)
|
|
||||||
loadUnicodeData("SentenceBreakProperty.txt", sentencePropertyRanges)
|
|
||||||
|
|
||||||
fmt.Fprintf(output, fileHeader, *url)
|
|
||||||
generateTables("Grapheme", graphemePropertyRanges)
|
|
||||||
generateTables("Word", wordPropertyRanges)
|
|
||||||
generateTables("Sentence", sentencePropertyRanges)
|
|
||||||
|
|
||||||
flushOutput()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WordBreakProperty.txt has the form:
|
|
||||||
// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD
|
|
||||||
// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ
|
|
||||||
func openReader(file string) (input io.ReadCloser) {
|
|
||||||
if *localFiles {
|
|
||||||
f, err := os.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
input = f
|
|
||||||
} else {
|
|
||||||
path := *url + file
|
|
||||||
resp, err := http.Get(path)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
log.Fatal("bad GET status for "+file, resp.Status)
|
|
||||||
}
|
|
||||||
input = resp.Body
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadUnicodeData(filename string, propertyRanges map[string]*unicode.RangeTable) {
|
|
||||||
f := openReader(filename)
|
|
||||||
defer f.Close()
|
|
||||||
bufioReader := bufio.NewReader(f)
|
|
||||||
line, err := bufioReader.ReadString('\n')
|
|
||||||
for err == nil {
|
|
||||||
parseLine(line, propertyRanges)
|
|
||||||
line, err = bufioReader.ReadString('\n')
|
|
||||||
}
|
|
||||||
// if the err was EOF still need to process last value
|
|
||||||
if err == io.EOF {
|
|
||||||
parseLine(line, propertyRanges)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const comment = "#"
|
|
||||||
const sep = ";"
|
|
||||||
const rnge = ".."
|
|
||||||
|
|
||||||
func parseLine(line string, propertyRanges map[string]*unicode.RangeTable) {
|
|
||||||
if strings.HasPrefix(line, comment) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if len(line) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
commentStart := strings.Index(line, comment)
|
|
||||||
if commentStart > 0 {
|
|
||||||
line = line[0:commentStart]
|
|
||||||
}
|
|
||||||
pieces := strings.Split(line, sep)
|
|
||||||
if len(pieces) != 2 {
|
|
||||||
log.Printf("unexpected %d pieces in %s", len(pieces), line)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
propertyName := strings.TrimSpace(pieces[1])
|
|
||||||
|
|
||||||
rangeTable, ok := propertyRanges[propertyName]
|
|
||||||
if !ok {
|
|
||||||
rangeTable = &unicode.RangeTable{
|
|
||||||
LatinOffset: 0,
|
|
||||||
}
|
|
||||||
propertyRanges[propertyName] = rangeTable
|
|
||||||
}
|
|
||||||
|
|
||||||
codepointRange := strings.TrimSpace(pieces[0])
|
|
||||||
rngeIndex := strings.Index(codepointRange, rnge)
|
|
||||||
|
|
||||||
if rngeIndex < 0 {
|
|
||||||
// single codepoint, not range
|
|
||||||
codepointInt, err := strconv.ParseUint(codepointRange, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error parsing int: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if codepointInt < 0x10000 {
|
|
||||||
r16 := unicode.Range16{
|
|
||||||
Lo: uint16(codepointInt),
|
|
||||||
Hi: uint16(codepointInt),
|
|
||||||
Stride: 1,
|
|
||||||
}
|
|
||||||
addR16ToTable(rangeTable, r16)
|
|
||||||
} else {
|
|
||||||
r32 := unicode.Range32{
|
|
||||||
Lo: uint32(codepointInt),
|
|
||||||
Hi: uint32(codepointInt),
|
|
||||||
Stride: 1,
|
|
||||||
}
|
|
||||||
addR32ToTable(rangeTable, r32)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
rngeStart := codepointRange[0:rngeIndex]
|
|
||||||
rngeEnd := codepointRange[rngeIndex+2:]
|
|
||||||
rngeStartInt, err := strconv.ParseUint(rngeStart, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error parsing int: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rngeEndInt, err := strconv.ParseUint(rngeEnd, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error parsing int: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if rngeStartInt < 0x10000 && rngeEndInt < 0x10000 {
|
|
||||||
r16 := unicode.Range16{
|
|
||||||
Lo: uint16(rngeStartInt),
|
|
||||||
Hi: uint16(rngeEndInt),
|
|
||||||
Stride: 1,
|
|
||||||
}
|
|
||||||
addR16ToTable(rangeTable, r16)
|
|
||||||
} else if rngeStartInt >= 0x10000 && rngeEndInt >= 0x10000 {
|
|
||||||
r32 := unicode.Range32{
|
|
||||||
Lo: uint32(rngeStartInt),
|
|
||||||
Hi: uint32(rngeEndInt),
|
|
||||||
Stride: 1,
|
|
||||||
}
|
|
||||||
addR32ToTable(rangeTable, r32)
|
|
||||||
} else {
|
|
||||||
log.Printf("unexpected range")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addR16ToTable(r *unicode.RangeTable, r16 unicode.Range16) {
|
|
||||||
if r.R16 == nil {
|
|
||||||
r.R16 = make([]unicode.Range16, 0, 1)
|
|
||||||
}
|
|
||||||
r.R16 = append(r.R16, r16)
|
|
||||||
if r16.Hi <= unicode.MaxLatin1 {
|
|
||||||
r.LatinOffset++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addR32ToTable(r *unicode.RangeTable, r32 unicode.Range32) {
|
|
||||||
if r.R32 == nil {
|
|
||||||
r.R32 = make([]unicode.Range32, 0, 1)
|
|
||||||
}
|
|
||||||
r.R32 = append(r.R32, r32)
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTables(prefix string, propertyRanges map[string]*unicode.RangeTable) {
|
|
||||||
prNames := make([]string, 0, len(propertyRanges))
|
|
||||||
for k := range propertyRanges {
|
|
||||||
prNames = append(prNames, k)
|
|
||||||
}
|
|
||||||
sort.Strings(prNames)
|
|
||||||
for _, key := range prNames {
|
|
||||||
rt := propertyRanges[key]
|
|
||||||
fmt.Fprintf(output, "var _%s%s = %s\n", prefix, key, generateRangeTable(rt))
|
|
||||||
}
|
|
||||||
fmt.Fprintf(output, "type _%sRuneRange unicode.RangeTable\n", prefix)
|
|
||||||
|
|
||||||
fmt.Fprintf(output, "func _%sRuneType(r rune) *_%sRuneRange {\n", prefix, prefix)
|
|
||||||
fmt.Fprintf(output, "\tswitch {\n")
|
|
||||||
for _, key := range prNames {
|
|
||||||
fmt.Fprintf(output, "\tcase unicode.Is(_%s%s, r):\n\t\treturn (*_%sRuneRange)(_%s%s)\n", prefix, key, prefix, prefix, key)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(output, "\tdefault:\n\t\treturn nil\n")
|
|
||||||
fmt.Fprintf(output, "\t}\n")
|
|
||||||
fmt.Fprintf(output, "}\n")
|
|
||||||
|
|
||||||
fmt.Fprintf(output, "func (rng *_%sRuneRange) String() string {\n", prefix)
|
|
||||||
fmt.Fprintf(output, "\tswitch (*unicode.RangeTable)(rng) {\n")
|
|
||||||
for _, key := range prNames {
|
|
||||||
fmt.Fprintf(output, "\tcase _%s%s:\n\t\treturn %q\n", prefix, key, key)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(output, "\tdefault:\n\t\treturn \"Other\"\n")
|
|
||||||
fmt.Fprintf(output, "\t}\n")
|
|
||||||
fmt.Fprintf(output, "}\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateRangeTable(rt *unicode.RangeTable) string {
|
|
||||||
rv := "&unicode.RangeTable{\n"
|
|
||||||
if rt.R16 != nil {
|
|
||||||
rv += "\tR16: []unicode.Range16{\n"
|
|
||||||
for _, r16 := range rt.R16 {
|
|
||||||
rv += fmt.Sprintf("\t\t%#v,\n", r16)
|
|
||||||
}
|
|
||||||
rv += "\t},\n"
|
|
||||||
}
|
|
||||||
if rt.R32 != nil {
|
|
||||||
rv += "\tR32: []unicode.Range32{\n"
|
|
||||||
for _, r32 := range rt.R32 {
|
|
||||||
rv += fmt.Sprintf("\t\t%#v,\n", r32)
|
|
||||||
}
|
|
||||||
rv += "\t},\n"
|
|
||||||
}
|
|
||||||
rv += fmt.Sprintf("\t\tLatinOffset: %d,\n", rt.LatinOffset)
|
|
||||||
rv += "}\n"
|
|
||||||
return rv
|
|
||||||
}
|
|
||||||
|
|
||||||
const fileHeader = `// Generated by running
|
|
||||||
// maketables --url=%s
|
|
||||||
// DO NOT EDIT
|
|
||||||
|
|
||||||
package textseg
|
|
||||||
|
|
||||||
import(
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
`
|
|
||||||
|
|
||||||
func setupOutput() {
|
|
||||||
output = bufio.NewWriter(startGofmt())
|
|
||||||
}
|
|
||||||
|
|
||||||
// startGofmt connects output to a gofmt process if -output is set.
|
|
||||||
func startGofmt() io.Writer {
|
|
||||||
if *outputFile == "" {
|
|
||||||
return os.Stdout
|
|
||||||
}
|
|
||||||
stdout, err := os.Create(*outputFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
// Pipe output to gofmt.
|
|
||||||
gofmt := exec.Command("gofmt")
|
|
||||||
fd, err := gofmt.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
gofmt.Stdout = stdout
|
|
||||||
gofmt.Stderr = os.Stderr
|
|
||||||
err = gofmt.Start()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
return fd
|
|
||||||
}
|
|
||||||
|
|
||||||
func flushOutput() {
|
|
||||||
err := output.Flush()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,212 +0,0 @@
|
||||||
// Copyright (c) 2014 Couchbase, Inc.
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
|
||||||
// except in compliance with the License. You may obtain a copy of the License at
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
// Unless required by applicable law or agreed to in writing, software distributed under the
|
|
||||||
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
|
||||||
// either express or implied. See the License for the specific language governing permissions
|
|
||||||
// and limitations under the License.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
var url = flag.String("url",
|
|
||||||
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/",
|
|
||||||
"URL of Unicode database directory")
|
|
||||||
var verbose = flag.Bool("verbose",
|
|
||||||
false,
|
|
||||||
"write data to stdout as it is parsed")
|
|
||||||
var localFiles = flag.Bool("local",
|
|
||||||
false,
|
|
||||||
"data files have been copied to the current directory; for debugging only")
|
|
||||||
|
|
||||||
var outputFile = flag.String("output",
|
|
||||||
"",
|
|
||||||
"output file for generated tables; default stdout")
|
|
||||||
|
|
||||||
var output *bufio.Writer
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
setupOutput()
|
|
||||||
|
|
||||||
graphemeTests := make([]test, 0)
|
|
||||||
graphemeTests = loadUnicodeData("GraphemeBreakTest.txt", graphemeTests)
|
|
||||||
wordTests := make([]test, 0)
|
|
||||||
wordTests = loadUnicodeData("WordBreakTest.txt", wordTests)
|
|
||||||
sentenceTests := make([]test, 0)
|
|
||||||
sentenceTests = loadUnicodeData("SentenceBreakTest.txt", sentenceTests)
|
|
||||||
|
|
||||||
fmt.Fprintf(output, fileHeader, *url)
|
|
||||||
generateTestTables("Grapheme", graphemeTests)
|
|
||||||
generateTestTables("Word", wordTests)
|
|
||||||
generateTestTables("Sentence", sentenceTests)
|
|
||||||
|
|
||||||
flushOutput()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WordBreakProperty.txt has the form:
|
|
||||||
// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD
|
|
||||||
// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ
|
|
||||||
func openReader(file string) (input io.ReadCloser) {
|
|
||||||
if *localFiles {
|
|
||||||
f, err := os.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
input = f
|
|
||||||
} else {
|
|
||||||
path := *url + file
|
|
||||||
resp, err := http.Get(path)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
log.Fatal("bad GET status for "+file, resp.Status)
|
|
||||||
}
|
|
||||||
input = resp.Body
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadUnicodeData(filename string, tests []test) []test {
|
|
||||||
f := openReader(filename)
|
|
||||||
defer f.Close()
|
|
||||||
bufioReader := bufio.NewReader(f)
|
|
||||||
line, err := bufioReader.ReadString('\n')
|
|
||||||
for err == nil {
|
|
||||||
tests = parseLine(line, tests)
|
|
||||||
line, err = bufioReader.ReadString('\n')
|
|
||||||
}
|
|
||||||
// if the err was EOF still need to process last value
|
|
||||||
if err == io.EOF {
|
|
||||||
tests = parseLine(line, tests)
|
|
||||||
}
|
|
||||||
return tests
|
|
||||||
}
|
|
||||||
|
|
||||||
const comment = "#"
|
|
||||||
const brk = "÷"
|
|
||||||
const nbrk = "×"
|
|
||||||
|
|
||||||
type test [][]byte
|
|
||||||
|
|
||||||
func parseLine(line string, tests []test) []test {
|
|
||||||
if strings.HasPrefix(line, comment) {
|
|
||||||
return tests
|
|
||||||
}
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if len(line) == 0 {
|
|
||||||
return tests
|
|
||||||
}
|
|
||||||
commentStart := strings.Index(line, comment)
|
|
||||||
if commentStart > 0 {
|
|
||||||
line = line[0:commentStart]
|
|
||||||
}
|
|
||||||
pieces := strings.Split(line, brk)
|
|
||||||
t := make(test, 0)
|
|
||||||
for _, piece := range pieces {
|
|
||||||
piece = strings.TrimSpace(piece)
|
|
||||||
if len(piece) > 0 {
|
|
||||||
codePoints := strings.Split(piece, nbrk)
|
|
||||||
word := ""
|
|
||||||
for _, codePoint := range codePoints {
|
|
||||||
codePoint = strings.TrimSpace(codePoint)
|
|
||||||
r, err := strconv.ParseInt(codePoint, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("err: %v for '%s'", err, string(r))
|
|
||||||
return tests
|
|
||||||
}
|
|
||||||
|
|
||||||
word += string(r)
|
|
||||||
}
|
|
||||||
t = append(t, []byte(word))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tests = append(tests, t)
|
|
||||||
return tests
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTestTables(prefix string, tests []test) {
|
|
||||||
fmt.Fprintf(output, testHeader, prefix)
|
|
||||||
for _, t := range tests {
|
|
||||||
fmt.Fprintf(output, "\t\t{\n")
|
|
||||||
fmt.Fprintf(output, "\t\t\tinput: %#v,\n", bytes.Join(t, []byte{}))
|
|
||||||
fmt.Fprintf(output, "\t\t\toutput: %s,\n", generateTest(t))
|
|
||||||
fmt.Fprintf(output, "\t\t},\n")
|
|
||||||
}
|
|
||||||
fmt.Fprintf(output, "}\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTest(t test) string {
|
|
||||||
rv := "[][]byte{"
|
|
||||||
for _, te := range t {
|
|
||||||
rv += fmt.Sprintf("%#v,", te)
|
|
||||||
}
|
|
||||||
rv += "}"
|
|
||||||
return rv
|
|
||||||
}
|
|
||||||
|
|
||||||
const fileHeader = `// Generated by running
|
|
||||||
// maketesttables --url=%s
|
|
||||||
// DO NOT EDIT
|
|
||||||
|
|
||||||
package textseg
|
|
||||||
`
|
|
||||||
|
|
||||||
const testHeader = `var unicode%sTests = []struct {
|
|
||||||
input []byte
|
|
||||||
output [][]byte
|
|
||||||
}{
|
|
||||||
`
|
|
||||||
|
|
||||||
func setupOutput() {
|
|
||||||
output = bufio.NewWriter(startGofmt())
|
|
||||||
}
|
|
||||||
|
|
||||||
// startGofmt connects output to a gofmt process if -output is set.
|
|
||||||
func startGofmt() io.Writer {
|
|
||||||
if *outputFile == "" {
|
|
||||||
return os.Stdout
|
|
||||||
}
|
|
||||||
stdout, err := os.Create(*outputFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
// Pipe output to gofmt.
|
|
||||||
gofmt := exec.Command("gofmt")
|
|
||||||
fd, err := gofmt.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
gofmt.Stdout = stdout
|
|
||||||
gofmt.Stderr = os.Stderr
|
|
||||||
err = gofmt.Start()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
return fd
|
|
||||||
}
|
|
||||||
|
|
||||||
func flushOutput() {
|
|
||||||
err := output.Flush()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -64,7 +64,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op
|
||||||
default:
|
default:
|
||||||
maxRetries := aws.IntValue(cfg.MaxRetries)
|
maxRetries := aws.IntValue(cfg.MaxRetries)
|
||||||
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||||
maxRetries = 3
|
maxRetries = DefaultRetryerMaxNumRetries
|
||||||
}
|
}
|
||||||
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -9,82 +10,142 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultRetryer implements basic retry logic using exponential backoff for
|
// DefaultRetryer implements basic retry logic using exponential backoff for
|
||||||
// most services. If you want to implement custom retry logic, implement the
|
// most services. If you want to implement custom retry logic, you can implement the
|
||||||
// request.Retryer interface or create a structure type that composes this
|
// request.Retryer interface.
|
||||||
// struct and override the specific methods. For example, to override only
|
|
||||||
// the MaxRetries method:
|
|
||||||
//
|
//
|
||||||
// type retryer struct {
|
|
||||||
// client.DefaultRetryer
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // This implementation always has 100 max retries
|
|
||||||
// func (d retryer) MaxRetries() int { return 100 }
|
|
||||||
type DefaultRetryer struct {
|
type DefaultRetryer struct {
|
||||||
|
// Num max Retries is the number of max retries that will be performed.
|
||||||
|
// By default, this is zero.
|
||||||
NumMaxRetries int
|
NumMaxRetries int
|
||||||
|
|
||||||
|
// MinRetryDelay is the minimum retry delay after which retry will be performed.
|
||||||
|
// If not set, the value is 0ns.
|
||||||
|
MinRetryDelay time.Duration
|
||||||
|
|
||||||
|
// MinThrottleRetryDelay is the minimum retry delay when throttled.
|
||||||
|
// If not set, the value is 0ns.
|
||||||
|
MinThrottleDelay time.Duration
|
||||||
|
|
||||||
|
// MaxRetryDelay is the maximum retry delay before which retry must be performed.
|
||||||
|
// If not set, the value is 0ns.
|
||||||
|
MaxRetryDelay time.Duration
|
||||||
|
|
||||||
|
// MaxThrottleDelay is the maximum retry delay when throttled.
|
||||||
|
// If not set, the value is 0ns.
|
||||||
|
MaxThrottleDelay time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultRetryerMaxNumRetries sets maximum number of retries
|
||||||
|
DefaultRetryerMaxNumRetries = 3
|
||||||
|
|
||||||
|
// DefaultRetryerMinRetryDelay sets minimum retry delay
|
||||||
|
DefaultRetryerMinRetryDelay = 30 * time.Millisecond
|
||||||
|
|
||||||
|
// DefaultRetryerMinThrottleDelay sets minimum delay when throttled
|
||||||
|
DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
|
||||||
|
|
||||||
|
// DefaultRetryerMaxRetryDelay sets maximum retry delay
|
||||||
|
DefaultRetryerMaxRetryDelay = 300 * time.Second
|
||||||
|
|
||||||
|
// DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
|
||||||
|
DefaultRetryerMaxThrottleDelay = 300 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
// MaxRetries returns the number of maximum returns the service will use to make
|
// MaxRetries returns the number of maximum returns the service will use to make
|
||||||
// an individual API request.
|
// an individual API request.
|
||||||
func (d DefaultRetryer) MaxRetries() int {
|
func (d DefaultRetryer) MaxRetries() int {
|
||||||
return d.NumMaxRetries
|
return d.NumMaxRetries
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setRetryerDefaults sets the default values of the retryer if not set
|
||||||
|
func (d *DefaultRetryer) setRetryerDefaults() {
|
||||||
|
if d.MinRetryDelay == 0 {
|
||||||
|
d.MinRetryDelay = DefaultRetryerMinRetryDelay
|
||||||
|
}
|
||||||
|
if d.MaxRetryDelay == 0 {
|
||||||
|
d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
|
||||||
|
}
|
||||||
|
if d.MinThrottleDelay == 0 {
|
||||||
|
d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
|
||||||
|
}
|
||||||
|
if d.MaxThrottleDelay == 0 {
|
||||||
|
d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// RetryRules returns the delay duration before retrying this request again
|
// RetryRules returns the delay duration before retrying this request again
|
||||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||||
// Set the upper limit of delay in retrying at ~five minutes
|
|
||||||
minTime := 30
|
// if number of max retries is zero, no retries will be performed.
|
||||||
throttle := d.shouldThrottle(r)
|
if d.NumMaxRetries == 0 {
|
||||||
if throttle {
|
return 0
|
||||||
if delay, ok := getRetryDelay(r); ok {
|
|
||||||
return delay
|
|
||||||
}
|
}
|
||||||
|
|
||||||
minTime = 500
|
// Sets default value for retryer members
|
||||||
|
d.setRetryerDefaults()
|
||||||
|
|
||||||
|
// minDelay is the minimum retryer delay
|
||||||
|
minDelay := d.MinRetryDelay
|
||||||
|
|
||||||
|
var initialDelay time.Duration
|
||||||
|
|
||||||
|
isThrottle := r.IsErrorThrottle()
|
||||||
|
if isThrottle {
|
||||||
|
if delay, ok := getRetryAfterDelay(r); ok {
|
||||||
|
initialDelay = delay
|
||||||
|
}
|
||||||
|
minDelay = d.MinThrottleDelay
|
||||||
}
|
}
|
||||||
|
|
||||||
retryCount := r.RetryCount
|
retryCount := r.RetryCount
|
||||||
if throttle && retryCount > 8 {
|
|
||||||
retryCount = 8
|
// maxDelay the maximum retryer delay
|
||||||
} else if retryCount > 13 {
|
maxDelay := d.MaxRetryDelay
|
||||||
retryCount = 13
|
|
||||||
|
if isThrottle {
|
||||||
|
maxDelay = d.MaxThrottleDelay
|
||||||
}
|
}
|
||||||
|
|
||||||
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
|
var delay time.Duration
|
||||||
return time.Duration(delay) * time.Millisecond
|
|
||||||
|
// Logic to cap the retry count based on the minDelay provided
|
||||||
|
actualRetryCount := int(math.Log2(float64(minDelay))) + 1
|
||||||
|
if actualRetryCount < 63-retryCount {
|
||||||
|
delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay)
|
||||||
|
if delay > maxDelay {
|
||||||
|
delay = getJitterDelay(maxDelay / 2)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
delay = getJitterDelay(maxDelay / 2)
|
||||||
|
}
|
||||||
|
return delay + initialDelay
|
||||||
|
}
|
||||||
|
|
||||||
|
// getJitterDelay returns a jittered delay for retry
|
||||||
|
func getJitterDelay(duration time.Duration) time.Duration {
|
||||||
|
return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldRetry returns true if the request should be retried.
|
// ShouldRetry returns true if the request should be retried.
|
||||||
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||||
|
|
||||||
|
// ShouldRetry returns false if number of max retries is 0.
|
||||||
|
if d.NumMaxRetries == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// If one of the other handlers already set the retry state
|
// If one of the other handlers already set the retry state
|
||||||
// we don't want to override it based on the service's state
|
// we don't want to override it based on the service's state
|
||||||
if r.Retryable != nil {
|
if r.Retryable != nil {
|
||||||
return *r.Retryable
|
return *r.Retryable
|
||||||
}
|
}
|
||||||
|
return r.IsErrorRetryable() || r.IsErrorThrottle()
|
||||||
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldThrottle returns true if the request should be throttled.
|
|
||||||
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
|
||||||
switch r.HTTPResponse.StatusCode {
|
|
||||||
case 429:
|
|
||||||
case 502:
|
|
||||||
case 503:
|
|
||||||
case 504:
|
|
||||||
default:
|
|
||||||
return r.IsErrorThrottle()
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This will look in the Retry-After header, RFC 7231, for how long
|
// This will look in the Retry-After header, RFC 7231, for how long
|
||||||
// it will wait before attempting another request
|
// it will wait before attempting another request
|
||||||
func getRetryDelay(r *request.Request) (time.Duration, bool) {
|
func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
|
||||||
if !canUseRetryAfterHeader(r) {
|
if !canUseRetryAfterHeader(r) {
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NoOpRetryer provides a retryer that performs no retries.
|
||||||
|
// It should be used when we do not want retries to be performed.
|
||||||
|
type NoOpRetryer struct{}
|
||||||
|
|
||||||
|
// MaxRetries returns the number of maximum returns the service will use to make
|
||||||
|
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
|
||||||
|
func (d NoOpRetryer) MaxRetries() int {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
|
||||||
|
func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryRules returns the delay duration before retrying this request again;
|
||||||
|
// since NoOpRetryer does not retry, RetryRules always returns 0.
|
||||||
|
func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
|
||||||
|
return 0
|
||||||
|
}
|
|
@ -20,7 +20,7 @@ type RequestRetryer interface{}
|
||||||
// A Config provides service configuration for service clients. By default,
|
// A Config provides service configuration for service clients. By default,
|
||||||
// all clients will use the defaults.DefaultConfig structure.
|
// all clients will use the defaults.DefaultConfig structure.
|
||||||
//
|
//
|
||||||
// // Create Session with MaxRetry configuration to be shared by multiple
|
// // Create Session with MaxRetries configuration to be shared by multiple
|
||||||
// // service clients.
|
// // service clients.
|
||||||
// sess := session.Must(session.NewSession(&aws.Config{
|
// sess := session.Must(session.NewSession(&aws.Config{
|
||||||
// MaxRetries: aws.Int(3),
|
// MaxRetries: aws.Int(3),
|
||||||
|
@ -251,7 +251,7 @@ type Config struct {
|
||||||
// NewConfig returns a new Config pointer that can be chained with builder
|
// NewConfig returns a new Config pointer that can be chained with builder
|
||||||
// methods to set multiple configuration values inline without using pointers.
|
// methods to set multiple configuration values inline without using pointers.
|
||||||
//
|
//
|
||||||
// // Create Session with MaxRetry configuration to be shared by multiple
|
// // Create Session with MaxRetries configuration to be shared by multiple
|
||||||
// // service clients.
|
// // service clients.
|
||||||
// sess := session.Must(session.NewSession(aws.NewConfig().
|
// sess := session.Must(session.NewSession(aws.NewConfig().
|
||||||
// WithMaxRetries(3),
|
// WithMaxRetries(3),
|
||||||
|
|
|
@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int {
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Uint returns a pointer to the uint value passed in.
|
||||||
|
func Uint(v uint) *uint {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintValue returns the value of the uint pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func UintValue(v *uint) uint {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintSlice converts a slice of uint values uinto a slice of
|
||||||
|
// uint pointers
|
||||||
|
func UintSlice(src []uint) []*uint {
|
||||||
|
dst := make([]*uint, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintValueSlice converts a slice of uint pointers uinto a slice of
|
||||||
|
// uint values
|
||||||
|
func UintValueSlice(src []*uint) []uint {
|
||||||
|
dst := make([]uint, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintMap converts a string map of uint values uinto a string
|
||||||
|
// map of uint pointers
|
||||||
|
func UintMap(src map[string]uint) map[string]*uint {
|
||||||
|
dst := make(map[string]*uint)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// UintValueMap converts a string map of uint pointers uinto a string
|
||||||
|
// map of uint values
|
||||||
|
func UintValueMap(src map[string]*uint) map[string]uint {
|
||||||
|
dst := make(map[string]uint)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int8 returns a pointer to the int8 value passed in.
|
||||||
|
func Int8(v int8) *int8 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int8Value returns the value of the int8 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Int8Value(v *int8) int8 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int8Slice converts a slice of int8 values into a slice of
|
||||||
|
// int8 pointers
|
||||||
|
func Int8Slice(src []int8) []*int8 {
|
||||||
|
dst := make([]*int8, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int8ValueSlice converts a slice of int8 pointers into a slice of
|
||||||
|
// int8 values
|
||||||
|
func Int8ValueSlice(src []*int8) []int8 {
|
||||||
|
dst := make([]int8, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int8Map converts a string map of int8 values into a string
|
||||||
|
// map of int8 pointers
|
||||||
|
func Int8Map(src map[string]int8) map[string]*int8 {
|
||||||
|
dst := make(map[string]*int8)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int8ValueMap converts a string map of int8 pointers into a string
|
||||||
|
// map of int8 values
|
||||||
|
func Int8ValueMap(src map[string]*int8) map[string]int8 {
|
||||||
|
dst := make(map[string]int8)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int16 returns a pointer to the int16 value passed in.
|
||||||
|
func Int16(v int16) *int16 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int16Value returns the value of the int16 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Int16Value(v *int16) int16 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int16Slice converts a slice of int16 values into a slice of
|
||||||
|
// int16 pointers
|
||||||
|
func Int16Slice(src []int16) []*int16 {
|
||||||
|
dst := make([]*int16, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int16ValueSlice converts a slice of int16 pointers into a slice of
|
||||||
|
// int16 values
|
||||||
|
func Int16ValueSlice(src []*int16) []int16 {
|
||||||
|
dst := make([]int16, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int16Map converts a string map of int16 values into a string
|
||||||
|
// map of int16 pointers
|
||||||
|
func Int16Map(src map[string]int16) map[string]*int16 {
|
||||||
|
dst := make(map[string]*int16)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int16ValueMap converts a string map of int16 pointers into a string
|
||||||
|
// map of int16 values
|
||||||
|
func Int16ValueMap(src map[string]*int16) map[string]int16 {
|
||||||
|
dst := make(map[string]int16)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32 returns a pointer to the int32 value passed in.
|
||||||
|
func Int32(v int32) *int32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Value returns the value of the int32 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Int32Value(v *int32) int32 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Slice converts a slice of int32 values into a slice of
|
||||||
|
// int32 pointers
|
||||||
|
func Int32Slice(src []int32) []*int32 {
|
||||||
|
dst := make([]*int32, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32ValueSlice converts a slice of int32 pointers into a slice of
|
||||||
|
// int32 values
|
||||||
|
func Int32ValueSlice(src []*int32) []int32 {
|
||||||
|
dst := make([]int32, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Map converts a string map of int32 values into a string
|
||||||
|
// map of int32 pointers
|
||||||
|
func Int32Map(src map[string]int32) map[string]*int32 {
|
||||||
|
dst := make(map[string]*int32)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32ValueMap converts a string map of int32 pointers into a string
|
||||||
|
// map of int32 values
|
||||||
|
func Int32ValueMap(src map[string]*int32) map[string]int32 {
|
||||||
|
dst := make(map[string]int32)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
// Int64 returns a pointer to the int64 value passed in.
|
// Int64 returns a pointer to the int64 value passed in.
|
||||||
func Int64(v int64) *int64 {
|
func Int64(v int64) *int64 {
|
||||||
return &v
|
return &v
|
||||||
|
@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Uint8 returns a pointer to the uint8 value passed in.
|
||||||
|
func Uint8(v uint8) *uint8 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint8Value returns the value of the uint8 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Uint8Value(v *uint8) uint8 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint8Slice converts a slice of uint8 values into a slice of
|
||||||
|
// uint8 pointers
|
||||||
|
func Uint8Slice(src []uint8) []*uint8 {
|
||||||
|
dst := make([]*uint8, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
|
||||||
|
// uint8 values
|
||||||
|
func Uint8ValueSlice(src []*uint8) []uint8 {
|
||||||
|
dst := make([]uint8, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint8Map converts a string map of uint8 values into a string
|
||||||
|
// map of uint8 pointers
|
||||||
|
func Uint8Map(src map[string]uint8) map[string]*uint8 {
|
||||||
|
dst := make(map[string]*uint8)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint8ValueMap converts a string map of uint8 pointers into a string
|
||||||
|
// map of uint8 values
|
||||||
|
func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
|
||||||
|
dst := make(map[string]uint8)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16 returns a pointer to the uint16 value passed in.
|
||||||
|
func Uint16(v uint16) *uint16 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16Value returns the value of the uint16 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Uint16Value(v *uint16) uint16 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16Slice converts a slice of uint16 values into a slice of
|
||||||
|
// uint16 pointers
|
||||||
|
func Uint16Slice(src []uint16) []*uint16 {
|
||||||
|
dst := make([]*uint16, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
|
||||||
|
// uint16 values
|
||||||
|
func Uint16ValueSlice(src []*uint16) []uint16 {
|
||||||
|
dst := make([]uint16, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16Map converts a string map of uint16 values into a string
|
||||||
|
// map of uint16 pointers
|
||||||
|
func Uint16Map(src map[string]uint16) map[string]*uint16 {
|
||||||
|
dst := make(map[string]*uint16)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16ValueMap converts a string map of uint16 pointers into a string
|
||||||
|
// map of uint16 values
|
||||||
|
func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
|
||||||
|
dst := make(map[string]uint16)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 returns a pointer to the uint32 value passed in.
|
||||||
|
func Uint32(v uint32) *uint32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32Value returns the value of the uint32 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Uint32Value(v *uint32) uint32 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32Slice converts a slice of uint32 values into a slice of
|
||||||
|
// uint32 pointers
|
||||||
|
func Uint32Slice(src []uint32) []*uint32 {
|
||||||
|
dst := make([]*uint32, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
|
||||||
|
// uint32 values
|
||||||
|
func Uint32ValueSlice(src []*uint32) []uint32 {
|
||||||
|
dst := make([]uint32, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32Map converts a string map of uint32 values into a string
|
||||||
|
// map of uint32 pointers
|
||||||
|
func Uint32Map(src map[string]uint32) map[string]*uint32 {
|
||||||
|
dst := make(map[string]*uint32)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32ValueMap converts a string map of uint32 pointers into a string
|
||||||
|
// map of uint32 values
|
||||||
|
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
|
||||||
|
dst := make(map[string]uint32)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 returns a pointer to the uint64 value passed in.
|
||||||
|
func Uint64(v uint64) *uint64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64Value returns the value of the uint64 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Uint64Value(v *uint64) uint64 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64Slice converts a slice of uint64 values into a slice of
|
||||||
|
// uint64 pointers
|
||||||
|
func Uint64Slice(src []uint64) []*uint64 {
|
||||||
|
dst := make([]*uint64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
|
||||||
|
// uint64 values
|
||||||
|
func Uint64ValueSlice(src []*uint64) []uint64 {
|
||||||
|
dst := make([]uint64, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64Map converts a string map of uint64 values into a string
|
||||||
|
// map of uint64 pointers
|
||||||
|
func Uint64Map(src map[string]uint64) map[string]*uint64 {
|
||||||
|
dst := make(map[string]*uint64)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64ValueMap converts a string map of uint64 pointers into a string
|
||||||
|
// map of uint64 values
|
||||||
|
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
|
||||||
|
dst := make(map[string]uint64)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 returns a pointer to the float32 value passed in.
|
||||||
|
func Float32(v float32) *float32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32Value returns the value of the float32 pointer passed in or
|
||||||
|
// 0 if the pointer is nil.
|
||||||
|
func Float32Value(v *float32) float32 {
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32Slice converts a slice of float32 values into a slice of
|
||||||
|
// float32 pointers
|
||||||
|
func Float32Slice(src []float32) []*float32 {
|
||||||
|
dst := make([]*float32, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
dst[i] = &(src[i])
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32ValueSlice converts a slice of float32 pointers into a slice of
|
||||||
|
// float32 values
|
||||||
|
func Float32ValueSlice(src []*float32) []float32 {
|
||||||
|
dst := make([]float32, len(src))
|
||||||
|
for i := 0; i < len(src); i++ {
|
||||||
|
if src[i] != nil {
|
||||||
|
dst[i] = *(src[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32Map converts a string map of float32 values into a string
|
||||||
|
// map of float32 pointers
|
||||||
|
func Float32Map(src map[string]float32) map[string]*float32 {
|
||||||
|
dst := make(map[string]*float32)
|
||||||
|
for k, val := range src {
|
||||||
|
v := val
|
||||||
|
dst[k] = &v
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32ValueMap converts a string map of float32 pointers into a string
|
||||||
|
// map of float32 values
|
||||||
|
func Float32ValueMap(src map[string]*float32) map[string]float32 {
|
||||||
|
dst := make(map[string]float32)
|
||||||
|
for k, val := range src {
|
||||||
|
if val != nil {
|
||||||
|
dst[k] = *val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
// Float64 returns a pointer to the float64 value passed in.
|
// Float64 returns a pointer to the float64 value passed in.
|
||||||
func Float64(v float64) *float64 {
|
func Float64(v float64) *float64 {
|
||||||
return &v
|
return &v
|
||||||
|
|
|
@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) {
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Catch all other request errors.
|
// Catch all request errors, and let the default retrier determine
|
||||||
|
// if the error is retryable.
|
||||||
r.Error = awserr.New("RequestError", "send request failed", err)
|
r.Error = awserr.New("RequestError", "send request failed", err)
|
||||||
r.Retryable = aws.Bool(true) // network errors are retryable
|
|
||||||
|
|
||||||
// Override the error with a context canceled error, if that was canceled.
|
// Override the error with a context canceled error, if that was canceled.
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
|
@ -184,7 +184,9 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH
|
||||||
|
|
||||||
// AfterRetryHandler performs final checks to determine if the request should
|
// AfterRetryHandler performs final checks to determine if the request should
|
||||||
// be retried and how long to delay.
|
// be retried and how long to delay.
|
||||||
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
|
var AfterRetryHandler = request.NamedHandler{
|
||||||
|
Name: "core.AfterRetryHandler",
|
||||||
|
Fn: func(r *request.Request) {
|
||||||
// If one of the other handlers already set the retry state
|
// If one of the other handlers already set the retry state
|
||||||
// we don't want to override it based on the service's state
|
// we don't want to override it based on the service's state
|
||||||
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
|
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
|
||||||
|
@ -214,7 +216,7 @@ var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn:
|
||||||
r.RetryCount++
|
r.RetryCount++
|
||||||
r.Error = nil
|
r.Error = nil
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// ValidateEndpointHandler is a request handler to validate a request had the
|
// ValidateEndpointHandler is a request handler to validate a request had the
|
||||||
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
||||||
|
|
|
@ -98,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
|
// NewCredentialsClient returns a pointer to a new Credentials object
|
||||||
// from an arbitrary endpoint concurrently. The client will request the
|
// wrapping the endpoint credentials Provider.
|
||||||
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
|
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
|
||||||
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
|
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
|
||||||
}
|
}
|
||||||
|
|
7
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
generated
vendored
|
@ -76,12 +76,15 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
|
||||||
// uses unix time in nanoseconds to uniquely identify sessions.
|
// uses unix time in nanoseconds to uniquely identify sessions.
|
||||||
sessionName = strconv.FormatInt(now().UnixNano(), 10)
|
sessionName = strconv.FormatInt(now().UnixNano(), 10)
|
||||||
}
|
}
|
||||||
resp, err := p.client.AssumeRoleWithWebIdentity(&sts.AssumeRoleWithWebIdentityInput{
|
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
|
||||||
RoleArn: &p.roleARN,
|
RoleArn: &p.roleARN,
|
||||||
RoleSessionName: &sessionName,
|
RoleSessionName: &sessionName,
|
||||||
WebIdentityToken: aws.String(string(b)),
|
WebIdentityToken: aws.String(string(b)),
|
||||||
})
|
})
|
||||||
if err != nil {
|
// InvalidIdentityToken error is a temporary error that can occur
|
||||||
|
// when assuming an Role with a JWT web identity token.
|
||||||
|
req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
|
||||||
|
if err := req.Send(); err != nil {
|
||||||
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
|
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,25 +16,26 @@ var (
|
||||||
|
|
||||||
type metricChan struct {
|
type metricChan struct {
|
||||||
ch chan metric
|
ch chan metric
|
||||||
paused int64
|
paused *int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMetricChan(size int) metricChan {
|
func newMetricChan(size int) metricChan {
|
||||||
return metricChan{
|
return metricChan{
|
||||||
ch: make(chan metric, size),
|
ch: make(chan metric, size),
|
||||||
|
paused: new(int64),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ch *metricChan) Pause() {
|
func (ch *metricChan) Pause() {
|
||||||
atomic.StoreInt64(&ch.paused, pausedEnum)
|
atomic.StoreInt64(ch.paused, pausedEnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ch *metricChan) Continue() {
|
func (ch *metricChan) Continue() {
|
||||||
atomic.StoreInt64(&ch.paused, runningEnum)
|
atomic.StoreInt64(ch.paused, runningEnum)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ch *metricChan) IsPaused() bool {
|
func (ch *metricChan) IsPaused() bool {
|
||||||
v := atomic.LoadInt64(&ch.paused)
|
v := atomic.LoadInt64(ch.paused)
|
||||||
return v == pausedEnum
|
return v == pausedEnum
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,6 +153,7 @@ type EC2IAMInfo struct {
|
||||||
// an instance identity document
|
// an instance identity document
|
||||||
type EC2InstanceIdentityDocument struct {
|
type EC2InstanceIdentityDocument struct {
|
||||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||||
|
MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
|
||||||
AvailabilityZone string `json:"availabilityZone"`
|
AvailabilityZone string `json:"availabilityZone"`
|
||||||
PrivateIP string `json:"privateIp"`
|
PrivateIP string `json:"privateIp"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
|
|
|
@ -123,7 +123,7 @@ func unmarshalHandler(r *request.Request) {
|
||||||
defer r.HTTPResponse.Body.Close()
|
defer r.HTTPResponse.Body.Close()
|
||||||
b := &bytes.Buffer{}
|
b := &bytes.Buffer{}
|
||||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||||
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err)
|
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ func unmarshalError(r *request.Request) {
|
||||||
defer r.HTTPResponse.Body.Close()
|
defer r.HTTPResponse.Body.Close()
|
||||||
b := &bytes.Buffer{}
|
b := &bytes.Buffer{}
|
||||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||||
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err)
|
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -23,7 +23,7 @@ type Handlers struct {
|
||||||
Complete HandlerList
|
Complete HandlerList
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy returns of this handler's lists.
|
// Copy returns a copy of this handler's lists.
|
||||||
func (h *Handlers) Copy() Handlers {
|
func (h *Handlers) Copy() Handlers {
|
||||||
return Handlers{
|
return Handlers{
|
||||||
Validate: h.Validate.copy(),
|
Validate: h.Validate.copy(),
|
||||||
|
@ -42,7 +42,7 @@ func (h *Handlers) Copy() Handlers {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear removes callback functions for all handlers
|
// Clear removes callback functions for all handlers.
|
||||||
func (h *Handlers) Clear() {
|
func (h *Handlers) Clear() {
|
||||||
h.Validate.Clear()
|
h.Validate.Clear()
|
||||||
h.Build.Clear()
|
h.Build.Clear()
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -65,6 +64,15 @@ type Request struct {
|
||||||
LastSignedAt time.Time
|
LastSignedAt time.Time
|
||||||
DisableFollowRedirects bool
|
DisableFollowRedirects bool
|
||||||
|
|
||||||
|
// Additional API error codes that should be retried. IsErrorRetryable
|
||||||
|
// will consider these codes in addition to its built in cases.
|
||||||
|
RetryErrorCodes []string
|
||||||
|
|
||||||
|
// Additional API error codes that should be retried with throttle backoff
|
||||||
|
// delay. IsErrorThrottle will consider these codes in addition to its
|
||||||
|
// built in cases.
|
||||||
|
ThrottleErrorCodes []string
|
||||||
|
|
||||||
// A value greater than 0 instructs the request to be signed as Presigned URL
|
// A value greater than 0 instructs the request to be signed as Presigned URL
|
||||||
// You should not set this field directly. Instead use Request's
|
// You should not set this field directly. Instead use Request's
|
||||||
// Presign or PresignRequest methods.
|
// Presign or PresignRequest methods.
|
||||||
|
@ -498,9 +506,7 @@ func (r *Request) Send() error {
|
||||||
|
|
||||||
if err := r.sendRequest(); err == nil {
|
if err := r.sendRequest(); err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else if !shouldRetryError(r.Error) {
|
}
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
r.Handlers.Retry.Run(r)
|
r.Handlers.Retry.Run(r)
|
||||||
r.Handlers.AfterRetry.Run(r)
|
r.Handlers.AfterRetry.Run(r)
|
||||||
|
|
||||||
|
@ -512,8 +518,6 @@ func (r *Request) Send() error {
|
||||||
r.Error = err
|
r.Error = err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -596,51 +600,6 @@ func AddToUserAgent(r *Request, s string) {
|
||||||
r.HTTPRequest.Header.Set("User-Agent", s)
|
r.HTTPRequest.Header.Set("User-Agent", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
type temporary interface {
|
|
||||||
Temporary() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldRetryError(origErr error) bool {
|
|
||||||
switch err := origErr.(type) {
|
|
||||||
case awserr.Error:
|
|
||||||
if err.Code() == CanceledErrorCode {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return shouldRetryError(err.OrigErr())
|
|
||||||
case *url.Error:
|
|
||||||
if strings.Contains(err.Error(), "connection refused") {
|
|
||||||
// Refused connections should be retried as the service may not yet
|
|
||||||
// be running on the port. Go TCP dial considers refused
|
|
||||||
// connections as not temporary.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// *url.Error only implements Temporary after golang 1.6 but since
|
|
||||||
// url.Error only wraps the error:
|
|
||||||
return shouldRetryError(err.Err)
|
|
||||||
case temporary:
|
|
||||||
if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// If the error is temporary, we want to allow continuation of the
|
|
||||||
// retry process
|
|
||||||
return err.Temporary() || isErrConnectionReset(origErr)
|
|
||||||
case nil:
|
|
||||||
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
|
|
||||||
// because we don't know the cause, it is marked as retryable. See
|
|
||||||
// TestRequest4xxUnretryable for an example.
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
switch err.Error() {
|
|
||||||
case "net/http: request canceled",
|
|
||||||
"net/http: request canceled while waiting for connection":
|
|
||||||
// known 1.5 error case when an http request is cancelled
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// here we don't know the error; so we allow a retry.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
// SanitizeHostForHeader removes default port from host and updates request.Host
|
||||||
func SanitizeHostForHeader(r *http.Request) {
|
func SanitizeHostForHeader(r *http.Request) {
|
||||||
host := getHost(r)
|
host := getHost(r)
|
||||||
|
|
|
@ -1,23 +1,41 @@
|
||||||
package request
|
package request
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Retryer is an interface to control retry logic for a given service.
|
// Retryer provides the interface drive the SDK's request retry behavior. The
|
||||||
// The default implementation used by most services is the client.DefaultRetryer
|
// Retryer implementation is responsible for implementing exponential backoff,
|
||||||
// structure, which contains basic retry logic using exponential backoff.
|
// and determine if a request API error should be retried.
|
||||||
|
//
|
||||||
|
// client.DefaultRetryer is the SDK's default implementation of the Retryer. It
|
||||||
|
// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle
|
||||||
|
// methods to determine if the request is retried.
|
||||||
type Retryer interface {
|
type Retryer interface {
|
||||||
|
// RetryRules return the retry delay that should be used by the SDK before
|
||||||
|
// making another request attempt for the failed request.
|
||||||
RetryRules(*Request) time.Duration
|
RetryRules(*Request) time.Duration
|
||||||
|
|
||||||
|
// ShouldRetry returns if the failed request is retryable.
|
||||||
|
//
|
||||||
|
// Implementations may consider request attempt count when determining if a
|
||||||
|
// request is retryable, but the SDK will use MaxRetries to limit the
|
||||||
|
// number of attempts a request are made.
|
||||||
ShouldRetry(*Request) bool
|
ShouldRetry(*Request) bool
|
||||||
|
|
||||||
|
// MaxRetries is the number of times a request may be retried before
|
||||||
|
// failing.
|
||||||
MaxRetries() int
|
MaxRetries() int
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithRetryer sets a config Retryer value to the given Config returning it
|
// WithRetryer sets a Retryer value to the given Config returning the Config
|
||||||
// for chaining.
|
// value for chaining.
|
||||||
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
||||||
cfg.Retryer = retryer
|
cfg.Retryer = retryer
|
||||||
return cfg
|
return cfg
|
||||||
|
@ -76,10 +94,6 @@ var validParentCodes = map[string]struct{}{
|
||||||
ErrCodeRead: {},
|
ErrCodeRead: {},
|
||||||
}
|
}
|
||||||
|
|
||||||
type temporaryError interface {
|
|
||||||
Temporary() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
||||||
if parentErr == nil {
|
if parentErr == nil {
|
||||||
return false
|
return false
|
||||||
|
@ -98,7 +112,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
||||||
return isCodeRetryable(aerr.Code())
|
return isCodeRetryable(aerr.Code())
|
||||||
}
|
}
|
||||||
|
|
||||||
if t, ok := err.(temporaryError); ok {
|
if t, ok := err.(temporary); ok {
|
||||||
return t.Temporary() || isErrConnectionReset(err)
|
return t.Temporary() || isErrConnectionReset(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,33 +122,91 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
||||||
// IsErrorRetryable returns whether the error is retryable, based on its Code.
|
// IsErrorRetryable returns whether the error is retryable, based on its Code.
|
||||||
// Returns false if error is nil.
|
// Returns false if error is nil.
|
||||||
func IsErrorRetryable(err error) bool {
|
func IsErrorRetryable(err error) bool {
|
||||||
if err != nil {
|
if err == nil {
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
return false
|
||||||
|
}
|
||||||
|
return shouldRetryError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type temporary interface {
|
||||||
|
Temporary() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldRetryError(origErr error) bool {
|
||||||
|
switch err := origErr.(type) {
|
||||||
|
case awserr.Error:
|
||||||
|
if err.Code() == CanceledErrorCode {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if isNestedErrorRetryable(err) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
origErr := err.OrigErr()
|
||||||
|
var shouldRetry bool
|
||||||
|
if origErr != nil {
|
||||||
|
shouldRetry := shouldRetryError(origErr)
|
||||||
|
if err.Code() == "RequestError" && !shouldRetry {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isCodeRetryable(err.Code()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return shouldRetry
|
||||||
|
|
||||||
|
case *url.Error:
|
||||||
|
if strings.Contains(err.Error(), "connection refused") {
|
||||||
|
// Refused connections should be retried as the service may not yet
|
||||||
|
// be running on the port. Go TCP dial considers refused
|
||||||
|
// connections as not temporary.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// *url.Error only implements Temporary after golang 1.6 but since
|
||||||
|
// url.Error only wraps the error:
|
||||||
|
return shouldRetryError(err.Err)
|
||||||
|
|
||||||
|
case temporary:
|
||||||
|
if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// If the error is temporary, we want to allow continuation of the
|
||||||
|
// retry process
|
||||||
|
return err.Temporary() || isErrConnectionReset(origErr)
|
||||||
|
|
||||||
|
case nil:
|
||||||
|
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
|
||||||
|
// because we don't know the cause, it is marked as retryable. See
|
||||||
|
// TestRequest4xxUnretryable for an example.
|
||||||
|
return true
|
||||||
|
|
||||||
|
default:
|
||||||
|
switch err.Error() {
|
||||||
|
case "net/http: request canceled",
|
||||||
|
"net/http: request canceled while waiting for connection":
|
||||||
|
// known 1.5 error case when an http request is cancelled
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// here we don't know the error; so we allow a retry.
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
||||||
// Returns false if error is nil.
|
// Returns false if error is nil.
|
||||||
func IsErrorThrottle(err error) bool {
|
func IsErrorThrottle(err error) bool {
|
||||||
if err != nil {
|
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
return isCodeThrottle(aerr.Code())
|
return isCodeThrottle(aerr.Code())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
|
// IsErrorExpiredCreds returns whether the error code is a credential expiry
|
||||||
// Returns false if error is nil.
|
// error. Returns false if error is nil.
|
||||||
func IsErrorExpiredCreds(err error) bool {
|
func IsErrorExpiredCreds(err error) bool {
|
||||||
if err != nil {
|
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
return isCodeExpiredCreds(aerr.Code())
|
return isCodeExpiredCreds(aerr.Code())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,17 +215,58 @@ func IsErrorExpiredCreds(err error) bool {
|
||||||
//
|
//
|
||||||
// Alias for the utility function IsErrorRetryable
|
// Alias for the utility function IsErrorRetryable
|
||||||
func (r *Request) IsErrorRetryable() bool {
|
func (r *Request) IsErrorRetryable() bool {
|
||||||
|
if isErrCode(r.Error, r.RetryErrorCodes) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP response status code 501 should not be retried.
|
||||||
|
// 501 represents Not Implemented which means the request method is not
|
||||||
|
// supported by the server and cannot be handled.
|
||||||
|
if r.HTTPResponse != nil {
|
||||||
|
// HTTP response status code 500 represents internal server error and
|
||||||
|
// should be retried without any throttle.
|
||||||
|
if r.HTTPResponse.StatusCode == 500 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
return IsErrorRetryable(r.Error)
|
return IsErrorRetryable(r.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
// IsErrorThrottle returns whether the error is to be throttled based on its
|
||||||
// Returns false if the request has no Error set
|
// code. Returns false if the request has no Error set.
|
||||||
//
|
//
|
||||||
// Alias for the utility function IsErrorThrottle
|
// Alias for the utility function IsErrorThrottle
|
||||||
func (r *Request) IsErrorThrottle() bool {
|
func (r *Request) IsErrorThrottle() bool {
|
||||||
|
if isErrCode(r.Error, r.ThrottleErrorCodes) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.HTTPResponse != nil {
|
||||||
|
switch r.HTTPResponse.StatusCode {
|
||||||
|
case
|
||||||
|
429, // error caused due to too many requests
|
||||||
|
502, // Bad Gateway error should be throttled
|
||||||
|
503, // caused when service is unavailable
|
||||||
|
504: // error occurred due to gateway timeout
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return IsErrorThrottle(r.Error)
|
return IsErrorThrottle(r.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isErrCode(err error, codes []string) bool {
|
||||||
|
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
|
||||||
|
for _, code := range codes {
|
||||||
|
if code == aerr.Code() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// IsErrorExpired returns whether the error code is a credential expiry error.
|
// IsErrorExpired returns whether the error code is a credential expiry error.
|
||||||
// Returns false if the request has no Error set.
|
// Returns false if the request has no Error set.
|
||||||
//
|
//
|
||||||
|
|
|
@ -99,10 +99,10 @@ type envConfig struct {
|
||||||
CustomCABundle string
|
CustomCABundle string
|
||||||
|
|
||||||
csmEnabled string
|
csmEnabled string
|
||||||
CSMEnabled bool
|
CSMEnabled *bool
|
||||||
CSMPort string
|
CSMPort string
|
||||||
CSMClientID string
|
|
||||||
CSMHost string
|
CSMHost string
|
||||||
|
CSMClientID string
|
||||||
|
|
||||||
// Enables endpoint discovery via environment variables.
|
// Enables endpoint discovery via environment variables.
|
||||||
//
|
//
|
||||||
|
@ -230,7 +230,11 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
|
||||||
setFromEnvVal(&cfg.CSMHost, csmHostEnvKey)
|
setFromEnvVal(&cfg.CSMHost, csmHostEnvKey)
|
||||||
setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
|
setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
|
||||||
setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
|
setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
|
||||||
cfg.CSMEnabled = len(cfg.csmEnabled) > 0
|
|
||||||
|
if len(cfg.csmEnabled) != 0 {
|
||||||
|
v, _ := strconv.ParseBool(cfg.csmEnabled)
|
||||||
|
cfg.CSMEnabled = &v
|
||||||
|
}
|
||||||
|
|
||||||
regionKeys := regionEnvKeys
|
regionKeys := regionEnvKeys
|
||||||
profileKeys := profileEnvKeys
|
profileKeys := profileEnvKeys
|
||||||
|
|
|
@ -104,9 +104,13 @@ func New(cfgs ...*aws.Config) *Session {
|
||||||
}
|
}
|
||||||
|
|
||||||
s := deprecatedNewSession(cfgs...)
|
s := deprecatedNewSession(cfgs...)
|
||||||
if envCfg.CSMEnabled {
|
|
||||||
err := enableCSM(&s.Handlers, envCfg.CSMClientID,
|
if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil {
|
||||||
envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger)
|
if l := s.Config.Logger; l != nil {
|
||||||
|
l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
|
||||||
|
}
|
||||||
|
} else if csmCfg.Enabled {
|
||||||
|
err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("failed to enable CSM, %v", err)
|
err = fmt.Errorf("failed to enable CSM, %v", err)
|
||||||
s.Config.Logger.Log("ERROR:", err.Error())
|
s.Config.Logger.Log("ERROR:", err.Error())
|
||||||
|
@ -132,7 +136,7 @@ func New(cfgs ...*aws.Config) *Session {
|
||||||
// to be built with retrieving credentials with AssumeRole set in the config.
|
// to be built with retrieving credentials with AssumeRole set in the config.
|
||||||
//
|
//
|
||||||
// See the NewSessionWithOptions func for information on how to override or
|
// See the NewSessionWithOptions func for information on how to override or
|
||||||
// control through code how the Session will be created. Such as specifying the
|
// control through code how the Session will be created, such as specifying the
|
||||||
// config profile, and controlling if shared config is enabled or not.
|
// config profile, and controlling if shared config is enabled or not.
|
||||||
func NewSession(cfgs ...*aws.Config) (*Session, error) {
|
func NewSession(cfgs ...*aws.Config) (*Session, error) {
|
||||||
opts := Options{}
|
opts := Options{}
|
||||||
|
@ -347,15 +351,12 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func enableCSM(handlers *request.Handlers,
|
func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error {
|
||||||
clientID, host, port string,
|
|
||||||
logger aws.Logger,
|
|
||||||
) error {
|
|
||||||
if logger != nil {
|
if logger != nil {
|
||||||
logger.Log("Enabling CSM")
|
logger.Log("Enabling CSM")
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port))
|
r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -395,7 +396,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
||||||
// Load additional config from file(s)
|
// Load additional config from file(s)
|
||||||
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
|
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
|
if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) {
|
||||||
|
// Special case where the user has not explicitly specified an AWS_PROFILE,
|
||||||
|
// or session.Options.profile, shared config is not enabled, and the
|
||||||
|
// environment has credentials, allow the shared config file to fail to
|
||||||
|
// load since the user has already provided credentials, and nothing else
|
||||||
|
// is required to be read file. Github(aws/aws-sdk-go#2455)
|
||||||
|
} else if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -410,9 +417,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
||||||
}
|
}
|
||||||
|
|
||||||
initHandlers(s)
|
initHandlers(s)
|
||||||
if envCfg.CSMEnabled {
|
|
||||||
err := enableCSM(&s.Handlers, envCfg.CSMClientID,
|
if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil {
|
||||||
envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger)
|
if l := s.Config.Logger; l != nil {
|
||||||
|
l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
|
||||||
|
}
|
||||||
|
} else if csmCfg.Enabled {
|
||||||
|
err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -428,6 +439,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type csmConfig struct {
|
||||||
|
Enabled bool
|
||||||
|
Host string
|
||||||
|
Port string
|
||||||
|
ClientID string
|
||||||
|
}
|
||||||
|
|
||||||
|
var csmProfileName = "aws_csm"
|
||||||
|
|
||||||
|
func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) {
|
||||||
|
if envCfg.CSMEnabled != nil {
|
||||||
|
if *envCfg.CSMEnabled {
|
||||||
|
return csmConfig{
|
||||||
|
Enabled: true,
|
||||||
|
ClientID: envCfg.CSMClientID,
|
||||||
|
Host: envCfg.CSMHost,
|
||||||
|
Port: envCfg.CSMPort,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return csmConfig{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false)
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
|
||||||
|
return csmConfig{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true {
|
||||||
|
return csmConfig{
|
||||||
|
Enabled: true,
|
||||||
|
ClientID: sharedCfg.CSMClientID,
|
||||||
|
Host: sharedCfg.CSMHost,
|
||||||
|
Port: sharedCfg.CSMPort,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return csmConfig{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
||||||
var t *http.Transport
|
var t *http.Transport
|
||||||
switch v := s.Config.HTTPClient.Transport.(type) {
|
switch v := s.Config.HTTPClient.Transport.(type) {
|
||||||
|
@ -520,7 +571,7 @@ func initHandlers(s *Session) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy creates and returns a copy of the current Session, coping the config
|
// Copy creates and returns a copy of the current Session, copying the config
|
||||||
// and handlers. If any additional configs are provided they will be merged
|
// and handlers. If any additional configs are provided they will be merged
|
||||||
// on top of the Session's copied config.
|
// on top of the Session's copied config.
|
||||||
//
|
//
|
||||||
|
|
|
@ -22,6 +22,12 @@ const (
|
||||||
mfaSerialKey = `mfa_serial` // optional
|
mfaSerialKey = `mfa_serial` // optional
|
||||||
roleSessionNameKey = `role_session_name` // optional
|
roleSessionNameKey = `role_session_name` // optional
|
||||||
|
|
||||||
|
// CSM options
|
||||||
|
csmEnabledKey = `csm_enabled`
|
||||||
|
csmHostKey = `csm_host`
|
||||||
|
csmPortKey = `csm_port`
|
||||||
|
csmClientIDKey = `csm_client_id`
|
||||||
|
|
||||||
// Additional Config fields
|
// Additional Config fields
|
||||||
regionKey = `region`
|
regionKey = `region`
|
||||||
|
|
||||||
|
@ -76,6 +82,12 @@ type sharedConfig struct {
|
||||||
//
|
//
|
||||||
// endpoint_discovery_enabled = true
|
// endpoint_discovery_enabled = true
|
||||||
EnableEndpointDiscovery *bool
|
EnableEndpointDiscovery *bool
|
||||||
|
|
||||||
|
// CSM Options
|
||||||
|
CSMEnabled *bool
|
||||||
|
CSMHost string
|
||||||
|
CSMPort string
|
||||||
|
CSMClientID string
|
||||||
}
|
}
|
||||||
|
|
||||||
type sharedConfigFile struct {
|
type sharedConfigFile struct {
|
||||||
|
@ -251,10 +263,13 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Endpoint discovery
|
// Endpoint discovery
|
||||||
if section.Has(enableEndpointDiscoveryKey) {
|
updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
|
||||||
v := section.Bool(enableEndpointDiscoveryKey)
|
|
||||||
cfg.EnableEndpointDiscovery = &v
|
// CSM options
|
||||||
}
|
updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey)
|
||||||
|
updateString(&cfg.CSMHost, section, csmHostKey)
|
||||||
|
updateString(&cfg.CSMPort, section, csmPortKey)
|
||||||
|
updateString(&cfg.CSMClientID, section, csmClientIDKey)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -348,6 +363,16 @@ func updateString(dst *string, section ini.Section, key string) {
|
||||||
*dst = section.String(key)
|
*dst = section.String(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// updateBoolPtr will only update the dst with the value in the section key,
|
||||||
|
// key is present in the section.
|
||||||
|
func updateBoolPtr(dst **bool, section ini.Section, key string) {
|
||||||
|
if !section.Has(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*dst = new(bool)
|
||||||
|
**dst = section.Bool(key)
|
||||||
|
}
|
||||||
|
|
||||||
// SharedConfigLoadError is an error for the shared config file failed to load.
|
// SharedConfigLoadError is an error for the shared config file failed to load.
|
||||||
type SharedConfigLoadError struct {
|
type SharedConfigLoadError struct {
|
||||||
Filename string
|
Filename string
|
||||||
|
|
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.22.0"
|
const SDKVersion = "1.25.3"
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
package sdkio
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Byte is 8 bits
|
||||||
|
Byte int64 = 1
|
||||||
|
// KibiByte (KiB) is 1024 Bytes
|
||||||
|
KibiByte = Byte * 1024
|
||||||
|
// MebiByte (MiB) is 1024 KiB
|
||||||
|
MebiByte = KibiByte * 1024
|
||||||
|
// GibiByte (GiB) is 1024 MiB
|
||||||
|
GibiByte = MebiByte * 1024
|
||||||
|
)
|
|
@ -0,0 +1,15 @@
|
||||||
|
// +build go1.10
|
||||||
|
|
||||||
|
package sdkmath
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// Round returns the nearest integer, rounding half away from zero.
|
||||||
|
//
|
||||||
|
// Special cases are:
|
||||||
|
// Round(±0) = ±0
|
||||||
|
// Round(±Inf) = ±Inf
|
||||||
|
// Round(NaN) = NaN
|
||||||
|
func Round(x float64) float64 {
|
||||||
|
return math.Round(x)
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
// +build !go1.10
|
||||||
|
|
||||||
|
package sdkmath
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
|
||||||
|
// Go version prior to Go 1.10.
|
||||||
|
const (
|
||||||
|
uvone = 0x3FF0000000000000
|
||||||
|
mask = 0x7FF
|
||||||
|
shift = 64 - 11 - 1
|
||||||
|
bias = 1023
|
||||||
|
signMask = 1 << 63
|
||||||
|
fracMask = 1<<shift - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Round returns the nearest integer, rounding half away from zero.
|
||||||
|
//
|
||||||
|
// Special cases are:
|
||||||
|
// Round(±0) = ±0
|
||||||
|
// Round(±Inf) = ±Inf
|
||||||
|
// Round(NaN) = NaN
|
||||||
|
//
|
||||||
|
// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
|
||||||
|
// Go version prior to Go 1.10.
|
||||||
|
func Round(x float64) float64 {
|
||||||
|
// Round is a faster implementation of:
|
||||||
|
//
|
||||||
|
// func Round(x float64) float64 {
|
||||||
|
// t := Trunc(x)
|
||||||
|
// if Abs(x-t) >= 0.5 {
|
||||||
|
// return t + Copysign(1, x)
|
||||||
|
// }
|
||||||
|
// return t
|
||||||
|
// }
|
||||||
|
bits := math.Float64bits(x)
|
||||||
|
e := uint(bits>>shift) & mask
|
||||||
|
if e < bias {
|
||||||
|
// Round abs(x) < 1 including denormals.
|
||||||
|
bits &= signMask // +-0
|
||||||
|
if e == bias-1 {
|
||||||
|
bits |= uvone // +-1
|
||||||
|
}
|
||||||
|
} else if e < bias+shift {
|
||||||
|
// Round any abs(x) >= 1 containing a fractional component [0,1).
|
||||||
|
//
|
||||||
|
// Numbers with larger exponents are returned unchanged since they
|
||||||
|
// must be either an integer, infinity, or NaN.
|
||||||
|
const half = 1 << (shift - 1)
|
||||||
|
e -= bias
|
||||||
|
bits += half >> e
|
||||||
|
bits &^= fracMask >> e
|
||||||
|
}
|
||||||
|
return math.Float64frombits(bits)
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package sdkrand
|
||||||
|
|
||||||
|
import "math/rand"
|
||||||
|
|
||||||
|
// Read provides the stub for math.Rand.Read method support for go version's
|
||||||
|
// 1.6 and greater.
|
||||||
|
func Read(r *rand.Rand, p []byte) (int, error) {
|
||||||
|
return r.Read(p)
|
||||||
|
}
|
|
@ -0,0 +1,24 @@
|
||||||
|
// +build !go1.6
|
||||||
|
|
||||||
|
package sdkrand
|
||||||
|
|
||||||
|
import "math/rand"
|
||||||
|
|
||||||
|
// Read backfills Go 1.6's math.Rand.Reader for Go 1.5
|
||||||
|
func Read(r *rand.Rand, p []byte) (n int, err error) {
|
||||||
|
// Copy of Go standard libraries math package's read function not added to
|
||||||
|
// standard library until Go 1.6.
|
||||||
|
var pos int8
|
||||||
|
var val int64
|
||||||
|
for n = 0; n < len(p); n++ {
|
||||||
|
if pos == 0 {
|
||||||
|
val = r.Int63()
|
||||||
|
pos = 7
|
||||||
|
}
|
||||||
|
p[n] = byte(val)
|
||||||
|
val >>= 8
|
||||||
|
pos--
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
|
@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
|
func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
|
||||||
|
if len(headers) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
switch r.Interface().(type) {
|
switch r.Interface().(type) {
|
||||||
case map[string]*string: // we only support string map value types
|
case map[string]*string: // we only support string map value types
|
||||||
out := map[string]*string{}
|
out := map[string]*string{}
|
||||||
|
@ -155,20 +158,29 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err
|
||||||
out[k[len(prefix):]] = &v[0]
|
out[k[len(prefix):]] = &v[0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(out) != 0 {
|
||||||
r.Set(reflect.ValueOf(out))
|
r.Set(reflect.ValueOf(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
|
func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
|
||||||
isJSONValue := tag.Get("type") == "jsonvalue"
|
switch tag.Get("type") {
|
||||||
if isJSONValue {
|
case "jsonvalue":
|
||||||
if len(header) == 0 {
|
if len(header) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
} else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
|
case "blob":
|
||||||
|
if len(header) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch v.Interface().(type) {
|
switch v.Interface().(type) {
|
||||||
case *string:
|
case *string:
|
||||||
|
@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
v.Set(reflect.ValueOf(&b))
|
v.Set(reflect.ValueOf(b))
|
||||||
case *bool:
|
case *bool:
|
||||||
b, err := strconv.ParseBool(header)
|
b, err := strconv.ParseBool(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -39,7 +39,7 @@ func Build(r *request.Request) {
|
||||||
r.Error = awserr.NewRequestFailure(
|
r.Error = awserr.NewRequestFailure(
|
||||||
awserr.New(request.ErrCodeSerialization,
|
awserr.New(request.ErrCodeSerialization,
|
||||||
"failed to encode rest XML request", err),
|
"failed to encode rest XML request", err),
|
||||||
r.HTTPResponse.StatusCode,
|
0,
|
||||||
r.RequestID,
|
r.RequestID,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
|
@ -1,8 +1,11 @@
|
||||||
package protocol
|
package protocol
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/internal/sdkmath"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Names of time formats supported by the SDK
|
// Names of time formats supported by the SDK
|
||||||
|
@ -13,12 +16,19 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time formats supported by the SDK
|
// Time formats supported by the SDK
|
||||||
|
// Output time is intended to not contain decimals
|
||||||
const (
|
const (
|
||||||
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
|
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
|
||||||
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||||
|
|
||||||
|
// This format is used for output time without seconds precision
|
||||||
|
RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||||
|
|
||||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||||
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
|
ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
|
||||||
|
|
||||||
|
// This format is used for output time without seconds precision
|
||||||
|
ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsKnownTimestampFormat returns if the timestamp format name
|
// IsKnownTimestampFormat returns if the timestamp format name
|
||||||
|
@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string {
|
||||||
|
|
||||||
switch name {
|
switch name {
|
||||||
case RFC822TimeFormatName:
|
case RFC822TimeFormatName:
|
||||||
return t.Format(RFC822TimeFormat)
|
return t.Format(RFC822OutputTimeFormat)
|
||||||
case ISO8601TimeFormatName:
|
case ISO8601TimeFormatName:
|
||||||
return t.Format(ISO8601TimeFormat)
|
return t.Format(ISO8601OutputTimeFormat)
|
||||||
case UnixTimeFormatName:
|
case UnixTimeFormatName:
|
||||||
return strconv.FormatInt(t.Unix(), 10)
|
return strconv.FormatInt(t.Unix(), 10)
|
||||||
default:
|
default:
|
||||||
|
@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) {
|
||||||
return time.Parse(ISO8601TimeFormat, value)
|
return time.Parse(ISO8601TimeFormat, value)
|
||||||
case UnixTimeFormatName:
|
case UnixTimeFormatName:
|
||||||
v, err := strconv.ParseFloat(value, 64)
|
v, err := strconv.ParseFloat(value, 64)
|
||||||
|
_, dec := math.Modf(v)
|
||||||
|
dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return time.Time{}, err
|
return time.Time{}, err
|
||||||
}
|
}
|
||||||
return time.Unix(int64(v), 0), nil
|
return time.Unix(int64(v), int64(dec*(1e9))), nil
|
||||||
default:
|
default:
|
||||||
panic("unknown timestamp format name, " + formatName)
|
panic("unknown timestamp format name, " + formatName)
|
||||||
}
|
}
|
||||||
|
|
32
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go
generated
vendored
Normal file
32
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
package xmlutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type xmlAttrSlice []xml.Attr
|
||||||
|
|
||||||
|
func (x xmlAttrSlice) Len() int {
|
||||||
|
return len(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x xmlAttrSlice) Less(i, j int) bool {
|
||||||
|
spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space
|
||||||
|
localI, localJ := x[i].Name.Local, x[j].Name.Local
|
||||||
|
valueI, valueJ := x[i].Value, x[j].Value
|
||||||
|
|
||||||
|
spaceCmp := strings.Compare(spaceI, spaceJ)
|
||||||
|
localCmp := strings.Compare(localI, localJ)
|
||||||
|
valueCmp := strings.Compare(valueI, valueJ)
|
||||||
|
|
||||||
|
if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x xmlAttrSlice) Swap(i, j int) {
|
||||||
|
x[i], x[j] = x[j], x[i]
|
||||||
|
}
|
13
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
|
@ -119,7 +119,18 @@ func (n *XMLNode) findElem(name string) (string, bool) {
|
||||||
|
|
||||||
// StructToXML writes an XMLNode to a xml.Encoder as tokens.
|
// StructToXML writes an XMLNode to a xml.Encoder as tokens.
|
||||||
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
|
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
|
||||||
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
|
// Sort Attributes
|
||||||
|
attrs := node.Attr
|
||||||
|
if sorted {
|
||||||
|
sortedAttrs := make([]xml.Attr, len(attrs))
|
||||||
|
for _, k := range node.Attr {
|
||||||
|
sortedAttrs = append(sortedAttrs, k)
|
||||||
|
}
|
||||||
|
sort.Sort(xmlAttrSlice(sortedAttrs))
|
||||||
|
attrs = sortedAttrs
|
||||||
|
}
|
||||||
|
|
||||||
|
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs})
|
||||||
|
|
||||||
if node.Text != "" {
|
if node.Text != "" {
|
||||||
e.EncodeToken(xml.CharData([]byte(node.Text)))
|
e.EncodeToken(xml.CharData([]byte(node.Text)))
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -15,15 +14,6 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
)
|
)
|
||||||
|
|
||||||
type retryer struct {
|
|
||||||
client.DefaultRetryer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d retryer) RetryRules(r *request.Request) time.Duration {
|
|
||||||
delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50
|
|
||||||
return delay * time.Millisecond
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
initClient = func(c *client.Client) {
|
initClient = func(c *client.Client) {
|
||||||
if c.Config.Retryer == nil {
|
if c.Config.Retryer == nil {
|
||||||
|
@ -43,10 +33,9 @@ func setCustomRetryer(c *client.Client) {
|
||||||
maxRetries = 10
|
maxRetries = 10
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Retryer = retryer{
|
c.Retryer = client.DefaultRetryer{
|
||||||
DefaultRetryer: client.DefaultRetryer{
|
|
||||||
NumMaxRetries: maxRetries,
|
NumMaxRetries: maxRetries,
|
||||||
},
|
MinRetryDelay: 50 * time.Millisecond,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16287,7 +16287,7 @@ func (s ChangePasswordOutput) GoString() string {
|
||||||
// evaluating the Condition elements of the input policies.
|
// evaluating the Condition elements of the input policies.
|
||||||
//
|
//
|
||||||
// This data type is used as an input parameter to SimulateCustomPolicy and
|
// This data type is used as an input parameter to SimulateCustomPolicy and
|
||||||
// SimulateCustomPolicy .
|
// SimulatePrincipalPolicy .
|
||||||
type ContextEntry struct {
|
type ContextEntry struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `type:"structure"`
|
||||||
|
|
||||||
|
@ -17183,8 +17183,7 @@ type CreateRoleInput struct {
|
||||||
// * The special characters tab (\u0009), line feed (\u000A), and carriage
|
// * The special characters tab (\u0009), line feed (\u000A), and carriage
|
||||||
// return (\u000D)
|
// return (\u000D)
|
||||||
//
|
//
|
||||||
// Upon success, the response includes the same trust policy as a URL-encoded
|
// Upon success, the response includes the same trust policy in JSON format.
|
||||||
// JSON string.
|
|
||||||
//
|
//
|
||||||
// AssumeRolePolicyDocument is a required field
|
// AssumeRolePolicyDocument is a required field
|
||||||
AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"`
|
AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"`
|
||||||
|
|
|
@ -7043,7 +7043,7 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI
|
||||||
}
|
}
|
||||||
|
|
||||||
type AbortMultipartUploadInput struct {
|
type AbortMultipartUploadInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"`
|
||||||
|
|
||||||
// Name of the bucket to which the multipart upload was initiated.
|
// Name of the bucket to which the multipart upload was initiated.
|
||||||
//
|
//
|
||||||
|
@ -8084,7 +8084,7 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
|
||||||
}
|
}
|
||||||
|
|
||||||
type CompleteMultipartUploadInput struct {
|
type CompleteMultipartUploadInput struct {
|
||||||
_ struct{} `type:"structure" payload:"MultipartUpload"`
|
_ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -8404,7 +8404,7 @@ func (s *ContinuationEvent) UnmarshalEvent(
|
||||||
}
|
}
|
||||||
|
|
||||||
type CopyObjectInput struct {
|
type CopyObjectInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"CopyObjectRequest" type:"structure"`
|
||||||
|
|
||||||
// The canned ACL to apply to the object.
|
// The canned ACL to apply to the object.
|
||||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||||
|
@ -9025,7 +9025,7 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateBucketInput struct {
|
type CreateBucketInput struct {
|
||||||
_ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
|
_ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"`
|
||||||
|
|
||||||
// The canned ACL to apply to the bucket.
|
// The canned ACL to apply to the bucket.
|
||||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
|
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
|
||||||
|
@ -9166,7 +9166,7 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateMultipartUploadInput struct {
|
type CreateMultipartUploadInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"`
|
||||||
|
|
||||||
// The canned ACL to apply to the object.
|
// The canned ACL to apply to the object.
|
||||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||||
|
@ -9708,7 +9708,7 @@ func (s *Delete) SetQuiet(v bool) *Delete {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketAnalyticsConfigurationInput struct {
|
type DeleteBucketAnalyticsConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket from which an analytics configuration is deleted.
|
// The name of the bucket from which an analytics configuration is deleted.
|
||||||
//
|
//
|
||||||
|
@ -9784,7 +9784,7 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketCorsInput struct {
|
type DeleteBucketCorsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -9844,7 +9844,7 @@ func (s DeleteBucketCorsOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketEncryptionInput struct {
|
type DeleteBucketEncryptionInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket containing the server-side encryption configuration
|
// The name of the bucket containing the server-side encryption configuration
|
||||||
// to delete.
|
// to delete.
|
||||||
|
@ -9907,7 +9907,7 @@ func (s DeleteBucketEncryptionOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketInput struct {
|
type DeleteBucketInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -9953,7 +9953,7 @@ func (s *DeleteBucketInput) getBucket() (v string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketInventoryConfigurationInput struct {
|
type DeleteBucketInventoryConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket containing the inventory configuration to delete.
|
// The name of the bucket containing the inventory configuration to delete.
|
||||||
//
|
//
|
||||||
|
@ -10029,7 +10029,7 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketLifecycleInput struct {
|
type DeleteBucketLifecycleInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -10089,7 +10089,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketMetricsConfigurationInput struct {
|
type DeleteBucketMetricsConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket containing the metrics configuration to delete.
|
// The name of the bucket containing the metrics configuration to delete.
|
||||||
//
|
//
|
||||||
|
@ -10179,7 +10179,7 @@ func (s DeleteBucketOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketPolicyInput struct {
|
type DeleteBucketPolicyInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -10239,7 +10239,7 @@ func (s DeleteBucketPolicyOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketReplicationInput struct {
|
type DeleteBucketReplicationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"`
|
||||||
|
|
||||||
// The bucket name.
|
// The bucket name.
|
||||||
//
|
//
|
||||||
|
@ -10304,7 +10304,7 @@ func (s DeleteBucketReplicationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketTaggingInput struct {
|
type DeleteBucketTaggingInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -10364,7 +10364,7 @@ func (s DeleteBucketTaggingOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteBucketWebsiteInput struct {
|
type DeleteBucketWebsiteInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -10510,7 +10510,7 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteObjectInput struct {
|
type DeleteObjectInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteObjectRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -10656,7 +10656,7 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteObjectTaggingInput struct {
|
type DeleteObjectTaggingInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -10749,7 +10749,7 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteObjectsInput struct {
|
type DeleteObjectsInput struct {
|
||||||
_ struct{} `type:"structure" payload:"Delete"`
|
_ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -10885,7 +10885,7 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeletePublicAccessBlockInput struct {
|
type DeletePublicAccessBlockInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"`
|
||||||
|
|
||||||
// The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.
|
// The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.
|
||||||
//
|
//
|
||||||
|
@ -11341,7 +11341,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule {
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketAccelerateConfigurationInput struct {
|
type GetBucketAccelerateConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// Name of the bucket for which the accelerate configuration is retrieved.
|
// Name of the bucket for which the accelerate configuration is retrieved.
|
||||||
//
|
//
|
||||||
|
@ -11412,7 +11412,7 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketAclInput struct {
|
type GetBucketAclInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketAclRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -11489,7 +11489,7 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketAnalyticsConfigurationInput struct {
|
type GetBucketAnalyticsConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket from which an analytics configuration is retrieved.
|
// The name of the bucket from which an analytics configuration is retrieved.
|
||||||
//
|
//
|
||||||
|
@ -11574,7 +11574,7 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketCorsInput struct {
|
type GetBucketCorsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketCorsRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -11642,7 +11642,7 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketEncryptionInput struct {
|
type GetBucketEncryptionInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket from which the server-side encryption configuration
|
// The name of the bucket from which the server-side encryption configuration
|
||||||
// is retrieved.
|
// is retrieved.
|
||||||
|
@ -11714,7 +11714,7 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketInventoryConfigurationInput struct {
|
type GetBucketInventoryConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket containing the inventory configuration to retrieve.
|
// The name of the bucket containing the inventory configuration to retrieve.
|
||||||
//
|
//
|
||||||
|
@ -11799,7 +11799,7 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketLifecycleConfigurationInput struct {
|
type GetBucketLifecycleConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -11867,7 +11867,7 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketLifecycleInput struct {
|
type GetBucketLifecycleInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -11935,7 +11935,7 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketLocationInput struct {
|
type GetBucketLocationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketLocationRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12003,7 +12003,7 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketLoggingInput struct {
|
type GetBucketLoggingInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12075,7 +12075,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketMetricsConfigurationInput struct {
|
type GetBucketMetricsConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket containing the metrics configuration to retrieve.
|
// The name of the bucket containing the metrics configuration to retrieve.
|
||||||
//
|
//
|
||||||
|
@ -12160,7 +12160,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketNotificationConfigurationRequest struct {
|
type GetBucketNotificationConfigurationRequest struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// Name of the bucket to get the notification configuration for.
|
// Name of the bucket to get the notification configuration for.
|
||||||
//
|
//
|
||||||
|
@ -12208,7 +12208,7 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketPolicyInput struct {
|
type GetBucketPolicyInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12277,7 +12277,7 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketPolicyStatusInput struct {
|
type GetBucketPolicyStatusInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the Amazon S3 bucket whose policy status you want to retrieve.
|
// The name of the Amazon S3 bucket whose policy status you want to retrieve.
|
||||||
//
|
//
|
||||||
|
@ -12348,7 +12348,7 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketReplicationInput struct {
|
type GetBucketReplicationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12418,7 +12418,7 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketRequestPaymentInput struct {
|
type GetBucketRequestPaymentInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12487,7 +12487,7 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketTaggingInput struct {
|
type GetBucketTaggingInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12556,7 +12556,7 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketVersioningInput struct {
|
type GetBucketVersioningInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12636,7 +12636,7 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetBucketWebsiteInput struct {
|
type GetBucketWebsiteInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12730,7 +12730,7 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetObjectAclInput struct {
|
type GetObjectAclInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetObjectAclRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -12853,7 +12853,7 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetObjectInput struct {
|
type GetObjectInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetObjectRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -13090,7 +13090,7 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetObjectLegalHoldInput struct {
|
type GetObjectLegalHoldInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"`
|
||||||
|
|
||||||
// The bucket containing the object whose Legal Hold status you want to retrieve.
|
// The bucket containing the object whose Legal Hold status you want to retrieve.
|
||||||
//
|
//
|
||||||
|
@ -13199,7 +13199,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetObjectLockConfigurationInput struct {
|
type GetObjectLockConfigurationInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"`
|
||||||
|
|
||||||
// The bucket whose object lock configuration you want to retrieve.
|
// The bucket whose object lock configuration you want to retrieve.
|
||||||
//
|
//
|
||||||
|
@ -13581,7 +13581,7 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetObjectRetentionInput struct {
|
type GetObjectRetentionInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"`
|
||||||
|
|
||||||
// The bucket containing the object whose retention settings you want to retrieve.
|
// The bucket containing the object whose retention settings you want to retrieve.
|
||||||
//
|
//
|
||||||
|
@ -13690,7 +13690,7 @@ func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObje
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetObjectTaggingInput struct {
|
type GetObjectTaggingInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -13790,7 +13790,7 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetObjectTorrentInput struct {
|
type GetObjectTorrentInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -13895,7 +13895,7 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetPublicAccessBlockInput struct {
|
type GetPublicAccessBlockInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
|
// The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
|
||||||
// want to retrieve.
|
// want to retrieve.
|
||||||
|
@ -14126,7 +14126,7 @@ func (s *Grantee) SetURI(v string) *Grantee {
|
||||||
}
|
}
|
||||||
|
|
||||||
type HeadBucketInput struct {
|
type HeadBucketInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"HeadBucketRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -14186,7 +14186,7 @@ func (s HeadBucketOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type HeadObjectInput struct {
|
type HeadObjectInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"HeadObjectRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -15661,7 +15661,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListBucketAnalyticsConfigurationsInput struct {
|
type ListBucketAnalyticsConfigurationsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket from which analytics configurations are retrieved.
|
// The name of the bucket from which analytics configurations are retrieved.
|
||||||
//
|
//
|
||||||
|
@ -15773,7 +15773,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListBucketInventoryConfigurationsInput struct {
|
type ListBucketInventoryConfigurationsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket containing the inventory configurations to retrieve.
|
// The name of the bucket containing the inventory configurations to retrieve.
|
||||||
//
|
//
|
||||||
|
@ -15887,7 +15887,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListBucketMetricsConfigurationsInput struct {
|
type ListBucketMetricsConfigurationsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"`
|
||||||
|
|
||||||
// The name of the bucket containing the metrics configurations to retrieve.
|
// The name of the bucket containing the metrics configurations to retrieve.
|
||||||
//
|
//
|
||||||
|
@ -16047,7 +16047,7 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListMultipartUploadsInput struct {
|
type ListMultipartUploadsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -16291,7 +16291,7 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListObjectVersionsInput struct {
|
type ListObjectVersionsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -16524,7 +16524,7 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListObjectsInput struct {
|
type ListObjectsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"ListObjectsRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -16736,7 +16736,7 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListObjectsV2Input struct {
|
type ListObjectsV2Input struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"ListObjectsV2Request" type:"structure"`
|
||||||
|
|
||||||
// Name of the bucket to list.
|
// Name of the bucket to list.
|
||||||
//
|
//
|
||||||
|
@ -16997,7 +16997,7 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListPartsInput struct {
|
type ListPartsInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"ListPartsRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -18622,7 +18622,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketAccelerateConfigurationInput struct {
|
type PutBucketAccelerateConfigurationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"AccelerateConfiguration"`
|
_ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"`
|
||||||
|
|
||||||
// Specifies the Accelerate Configuration you want to set for the bucket.
|
// Specifies the Accelerate Configuration you want to set for the bucket.
|
||||||
//
|
//
|
||||||
|
@ -18698,7 +18698,7 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketAclInput struct {
|
type PutBucketAclInput struct {
|
||||||
_ struct{} `type:"structure" payload:"AccessControlPolicy"`
|
_ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"`
|
||||||
|
|
||||||
// The canned ACL to apply to the bucket.
|
// The canned ACL to apply to the bucket.
|
||||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
|
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
|
||||||
|
@ -18827,7 +18827,7 @@ func (s PutBucketAclOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketAnalyticsConfigurationInput struct {
|
type PutBucketAnalyticsConfigurationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
|
_ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"`
|
||||||
|
|
||||||
// The configuration and any analyses for the analytics filter.
|
// The configuration and any analyses for the analytics filter.
|
||||||
//
|
//
|
||||||
|
@ -18922,7 +18922,7 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketCorsInput struct {
|
type PutBucketCorsInput struct {
|
||||||
_ struct{} `type:"structure" payload:"CORSConfiguration"`
|
_ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19004,7 +19004,7 @@ func (s PutBucketCorsOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketEncryptionInput struct {
|
type PutBucketEncryptionInput struct {
|
||||||
_ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
|
_ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"`
|
||||||
|
|
||||||
// Specifies default encryption for a bucket using server-side encryption with
|
// Specifies default encryption for a bucket using server-side encryption with
|
||||||
// Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information
|
// Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information
|
||||||
|
@ -19089,7 +19089,7 @@ func (s PutBucketEncryptionOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketInventoryConfigurationInput struct {
|
type PutBucketInventoryConfigurationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"InventoryConfiguration"`
|
_ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"`
|
||||||
|
|
||||||
// The name of the bucket where the inventory configuration will be stored.
|
// The name of the bucket where the inventory configuration will be stored.
|
||||||
//
|
//
|
||||||
|
@ -19184,7 +19184,7 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketLifecycleConfigurationInput struct {
|
type PutBucketLifecycleConfigurationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"LifecycleConfiguration"`
|
_ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19260,7 +19260,7 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketLifecycleInput struct {
|
type PutBucketLifecycleInput struct {
|
||||||
_ struct{} `type:"structure" payload:"LifecycleConfiguration"`
|
_ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19333,7 +19333,7 @@ func (s PutBucketLifecycleOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketLoggingInput struct {
|
type PutBucketLoggingInput struct {
|
||||||
_ struct{} `type:"structure" payload:"BucketLoggingStatus"`
|
_ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19410,7 +19410,7 @@ func (s PutBucketLoggingOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketMetricsConfigurationInput struct {
|
type PutBucketMetricsConfigurationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"MetricsConfiguration"`
|
_ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"`
|
||||||
|
|
||||||
// The name of the bucket for which the metrics configuration is set.
|
// The name of the bucket for which the metrics configuration is set.
|
||||||
//
|
//
|
||||||
|
@ -19505,7 +19505,7 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketNotificationConfigurationInput struct {
|
type PutBucketNotificationConfigurationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"NotificationConfiguration"`
|
_ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19585,7 +19585,7 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketNotificationInput struct {
|
type PutBucketNotificationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"NotificationConfiguration"`
|
_ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19657,7 +19657,7 @@ func (s PutBucketNotificationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketPolicyInput struct {
|
type PutBucketPolicyInput struct {
|
||||||
_ struct{} `type:"structure" payload:"Policy"`
|
_ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19741,7 +19741,7 @@ func (s PutBucketPolicyOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketReplicationInput struct {
|
type PutBucketReplicationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"ReplicationConfiguration"`
|
_ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19830,7 +19830,7 @@ func (s PutBucketReplicationOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketRequestPaymentInput struct {
|
type PutBucketRequestPaymentInput struct {
|
||||||
_ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
|
_ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19907,7 +19907,7 @@ func (s PutBucketRequestPaymentOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketTaggingInput struct {
|
type PutBucketTaggingInput struct {
|
||||||
_ struct{} `type:"structure" payload:"Tagging"`
|
_ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -19984,7 +19984,7 @@ func (s PutBucketTaggingOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketVersioningInput struct {
|
type PutBucketVersioningInput struct {
|
||||||
_ struct{} `type:"structure" payload:"VersioningConfiguration"`
|
_ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -20070,7 +20070,7 @@ func (s PutBucketVersioningOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutBucketWebsiteInput struct {
|
type PutBucketWebsiteInput struct {
|
||||||
_ struct{} `type:"structure" payload:"WebsiteConfiguration"`
|
_ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -20149,7 +20149,7 @@ func (s PutBucketWebsiteOutput) GoString() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutObjectAclInput struct {
|
type PutObjectAclInput struct {
|
||||||
_ struct{} `type:"structure" payload:"AccessControlPolicy"`
|
_ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"`
|
||||||
|
|
||||||
// The canned ACL to apply to the object.
|
// The canned ACL to apply to the object.
|
||||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||||
|
@ -20324,7 +20324,7 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutObjectInput struct {
|
type PutObjectInput struct {
|
||||||
_ struct{} `type:"structure" payload:"Body"`
|
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
|
||||||
|
|
||||||
// The canned ACL to apply to the object.
|
// The canned ACL to apply to the object.
|
||||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||||
|
@ -20671,7 +20671,7 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutObjectLegalHoldInput struct {
|
type PutObjectLegalHoldInput struct {
|
||||||
_ struct{} `type:"structure" payload:"LegalHold"`
|
_ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"`
|
||||||
|
|
||||||
// The bucket containing the object that you want to place a Legal Hold on.
|
// The bucket containing the object that you want to place a Legal Hold on.
|
||||||
//
|
//
|
||||||
|
@ -20791,7 +20791,7 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutObjectLockConfigurationInput struct {
|
type PutObjectLockConfigurationInput struct {
|
||||||
_ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
|
_ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"`
|
||||||
|
|
||||||
// The bucket whose object lock configuration you want to create or replace.
|
// The bucket whose object lock configuration you want to create or replace.
|
||||||
//
|
//
|
||||||
|
@ -20998,7 +20998,7 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutObjectRetentionInput struct {
|
type PutObjectRetentionInput struct {
|
||||||
_ struct{} `type:"structure" payload:"Retention"`
|
_ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"`
|
||||||
|
|
||||||
// The bucket that contains the object you want to apply this Object Retention
|
// The bucket that contains the object you want to apply this Object Retention
|
||||||
// configuration to.
|
// configuration to.
|
||||||
|
@ -21129,7 +21129,7 @@ func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetenti
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutObjectTaggingInput struct {
|
type PutObjectTaggingInput struct {
|
||||||
_ struct{} `type:"structure" payload:"Tagging"`
|
_ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -21237,7 +21237,7 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutPublicAccessBlockInput struct {
|
type PutPublicAccessBlockInput struct {
|
||||||
_ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"`
|
_ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"`
|
||||||
|
|
||||||
// The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
|
// The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
|
||||||
// want to set.
|
// want to set.
|
||||||
|
@ -21999,7 +21999,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress {
|
||||||
}
|
}
|
||||||
|
|
||||||
type RestoreObjectInput struct {
|
type RestoreObjectInput struct {
|
||||||
_ struct{} `type:"structure" payload:"RestoreRequest"`
|
_ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -23715,7 +23715,7 @@ func (s *Transition) SetStorageClass(v string) *Transition {
|
||||||
}
|
}
|
||||||
|
|
||||||
type UploadPartCopyInput struct {
|
type UploadPartCopyInput struct {
|
||||||
_ struct{} `type:"structure"`
|
_ struct{} `locationName:"UploadPartCopyRequest" type:"structure"`
|
||||||
|
|
||||||
// Bucket is a required field
|
// Bucket is a required field
|
||||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||||
|
@ -24045,7 +24045,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
type UploadPartInput struct {
|
type UploadPartInput struct {
|
||||||
_ struct{} `type:"structure" payload:"Body"`
|
_ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"`
|
||||||
|
|
||||||
// Object data.
|
// Object data.
|
||||||
Body io.ReadSeeker `type:"blob"`
|
Body io.ReadSeeker `type:"blob"`
|
||||||
|
|
|
@ -63,6 +63,20 @@
|
||||||
// See the s3manager package's Downloader type documentation for more information.
|
// See the s3manager package's Downloader type documentation for more information.
|
||||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
|
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
|
||||||
//
|
//
|
||||||
|
// Automatic URI cleaning
|
||||||
|
//
|
||||||
|
// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname)
|
||||||
|
// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct
|
||||||
|
// used by the service client.
|
||||||
|
//
|
||||||
|
// svc := s3.New(sess, &aws.Config{
|
||||||
|
// DisableRestProtocolURICleaning: aws.Bool(true),
|
||||||
|
// })
|
||||||
|
// out, err := svc.GetObject(&s3.GetObjectInput {
|
||||||
|
// Bucket: aws.String("bucketname"),
|
||||||
|
// Key: aws.String("//foo//bar//moo"),
|
||||||
|
// })
|
||||||
|
//
|
||||||
// Get Bucket Region
|
// Get Bucket Region
|
||||||
//
|
//
|
||||||
// GetBucketRegion will attempt to get the region for a bucket using a region
|
// GetBucketRegion will attempt to get the region for a bucket using a region
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
package sts
|
||||||
|
|
||||||
|
import "github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
initRequest = customizeRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func customizeRequest(r *request.Request) {
|
||||||
|
r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException)
|
||||||
|
}
|
|
@ -1,21 +1,20 @@
|
||||||
dist: xenial
|
dist: xenial
|
||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- "1.11.x"
|
- "1.13.x"
|
||||||
env:
|
|
||||||
GOFLAGS=-mod=vendor
|
matrix:
|
||||||
|
fast_finish: true
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- make tools
|
- make tools
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- make lint
|
- make lint
|
||||||
- make test
|
- go test -timeout=30s -parallel=4 -v ./...
|
||||||
|
|
||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
matrix:
|
|
||||||
fast_finish: true
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
||||||
|
|
|
@ -1,3 +1,9 @@
|
||||||
|
# v0.4.0 (October 3, 2019)
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
* awsauth: fixed credentials retrieval, validation, and error handling
|
||||||
|
|
||||||
# v0.3.0 (February 26, 2019)
|
# v0.3.0 (February 26, 2019)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
default: test lint
|
default: test lint
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
@echo "==> Fixing source code with gofmt..."
|
||||||
|
gofmt -s -w ./
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@echo "==> Checking source code against linters..."
|
@echo "==> Checking source code against linters..."
|
||||||
@golangci-lint run ./...
|
@golangci-lint run ./...
|
||||||
|
|
|
@ -6,7 +6,7 @@ An opinionated [AWS Go SDK](https://github.com/aws/aws-sdk-go) library for consi
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- [Go](https://golang.org/doc/install) 1.11.4+
|
- [Go](https://golang.org/doc/install) 1.12
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,21 @@ import (
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// errMsgNoValidCredentialSources error getting credentials
|
||||||
|
errMsgNoValidCredentialSources = `No valid credential sources found for AWS Provider.
|
||||||
|
Please see https://terraform.io/docs/providers/aws/index.html for more information on
|
||||||
|
providing credentials for the AWS Provider`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoValidCredentialSources indicates that no credentials source could be found
|
||||||
|
ErrNoValidCredentialSources = errNoValidCredentialSources()
|
||||||
|
)
|
||||||
|
|
||||||
|
func errNoValidCredentialSources() error { return errors.New(errMsgNoValidCredentialSources) }
|
||||||
|
|
||||||
|
// GetAccountIDAndPartition gets the account ID and associated partition.
|
||||||
func GetAccountIDAndPartition(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) {
|
func GetAccountIDAndPartition(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) {
|
||||||
var accountID, partition string
|
var accountID, partition string
|
||||||
var err, errors error
|
var err, errors error
|
||||||
|
@ -51,6 +66,8 @@ func GetAccountIDAndPartition(iamconn *iam.IAM, stsconn *sts.STS, authProviderNa
|
||||||
return accountID, partition, errors
|
return accountID, partition, errors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAccountIDAndPartitionFromEC2Metadata gets the account ID and associated
|
||||||
|
// partition from EC2 metadata.
|
||||||
func GetAccountIDAndPartitionFromEC2Metadata() (string, string, error) {
|
func GetAccountIDAndPartitionFromEC2Metadata() (string, string, error) {
|
||||||
log.Println("[DEBUG] Trying to get account information via EC2 Metadata")
|
log.Println("[DEBUG] Trying to get account information via EC2 Metadata")
|
||||||
|
|
||||||
|
@ -75,6 +92,8 @@ func GetAccountIDAndPartitionFromEC2Metadata() (string, string, error) {
|
||||||
return parseAccountIDAndPartitionFromARN(info.InstanceProfileArn)
|
return parseAccountIDAndPartitionFromARN(info.InstanceProfileArn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAccountIDAndPartitionFromIAMGetUser gets the account ID and associated
|
||||||
|
// partition from IAM.
|
||||||
func GetAccountIDAndPartitionFromIAMGetUser(iamconn *iam.IAM) (string, string, error) {
|
func GetAccountIDAndPartitionFromIAMGetUser(iamconn *iam.IAM) (string, string, error) {
|
||||||
log.Println("[DEBUG] Trying to get account information via iam:GetUser")
|
log.Println("[DEBUG] Trying to get account information via iam:GetUser")
|
||||||
|
|
||||||
|
@ -102,6 +121,8 @@ func GetAccountIDAndPartitionFromIAMGetUser(iamconn *iam.IAM) (string, string, e
|
||||||
return parseAccountIDAndPartitionFromARN(aws.StringValue(output.User.Arn))
|
return parseAccountIDAndPartitionFromARN(aws.StringValue(output.User.Arn))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAccountIDAndPartitionFromIAMListRoles gets the account ID and associated
|
||||||
|
// partition from listing IAM roles.
|
||||||
func GetAccountIDAndPartitionFromIAMListRoles(iamconn *iam.IAM) (string, string, error) {
|
func GetAccountIDAndPartitionFromIAMListRoles(iamconn *iam.IAM) (string, string, error) {
|
||||||
log.Println("[DEBUG] Trying to get account information via iam:ListRoles")
|
log.Println("[DEBUG] Trying to get account information via iam:ListRoles")
|
||||||
|
|
||||||
|
@ -123,6 +144,8 @@ func GetAccountIDAndPartitionFromIAMListRoles(iamconn *iam.IAM) (string, string,
|
||||||
return parseAccountIDAndPartitionFromARN(aws.StringValue(output.Roles[0].Arn))
|
return parseAccountIDAndPartitionFromARN(aws.StringValue(output.Roles[0].Arn))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAccountIDAndPartitionFromSTSGetCallerIdentity gets the account ID and associated
|
||||||
|
// partition from STS caller identity.
|
||||||
func GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsconn *sts.STS) (string, string, error) {
|
func GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsconn *sts.STS) (string, string, error) {
|
||||||
log.Println("[DEBUG] Trying to get account information via sts:GetCallerIdentity")
|
log.Println("[DEBUG] Trying to get account information via sts:GetCallerIdentity")
|
||||||
|
|
||||||
|
@ -148,9 +171,54 @@ func parseAccountIDAndPartitionFromARN(inputARN string) (string, string, error)
|
||||||
return arn.AccountID, arn.Partition, nil
|
return arn.AccountID, arn.Partition, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is responsible for reading credentials from the
|
// GetCredentialsFromSession returns credentials derived from a session. A
|
||||||
// environment in the case that they're not explicitly specified
|
// session uses the AWS SDK Go chain of providers so may use a provider (e.g.,
|
||||||
// in the Terraform configuration.
|
// ProcessProvider) that is not part of the Terraform provider chain.
|
||||||
|
func GetCredentialsFromSession(c *Config) (*awsCredentials.Credentials, error) {
|
||||||
|
log.Printf("[INFO] Attempting to use session-derived credentials")
|
||||||
|
|
||||||
|
var sess *session.Session
|
||||||
|
var err error
|
||||||
|
if c.Profile == "" {
|
||||||
|
sess, err = session.NewSession()
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrNoValidCredentialSources
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
options := &session.Options{
|
||||||
|
Config: aws.Config{
|
||||||
|
HTTPClient: cleanhttp.DefaultClient(),
|
||||||
|
MaxRetries: aws.Int(0),
|
||||||
|
Region: aws.String(c.Region),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
options.Profile = c.Profile
|
||||||
|
options.SharedConfigState = session.SharedConfigEnable
|
||||||
|
|
||||||
|
sess, err = session.NewSessionWithOptions(*options)
|
||||||
|
if err != nil {
|
||||||
|
if IsAWSErr(err, "NoCredentialProviders", "") {
|
||||||
|
return nil, ErrNoValidCredentialSources
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Error creating AWS session: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
creds := sess.Config.Credentials
|
||||||
|
cp, err := sess.Config.Credentials.Get()
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrNoValidCredentialSources
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Successfully derived credentials from session")
|
||||||
|
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
||||||
|
return creds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCredentials gets credentials from the environment, shared credentials,
|
||||||
|
// or the session (which may include a credential process). GetCredentials also
|
||||||
|
// validates the credentials and the ability to assume a role or will return an
|
||||||
|
// error if unsuccessful.
|
||||||
func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
|
func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
|
||||||
// build a chain provider, lazy-evaluated by aws-sdk
|
// build a chain provider, lazy-evaluated by aws-sdk
|
||||||
providers := []awsCredentials.Provider{
|
providers := []awsCredentials.Provider{
|
||||||
|
@ -225,29 +293,31 @@ func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the "normal" flow (i.e. not assuming a role)
|
// Validate the credentials before returning them
|
||||||
if c.AssumeRoleARN == "" {
|
|
||||||
return awsCredentials.NewChainCredentials(providers), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise we need to construct and STS client with the main credentials, and verify
|
|
||||||
// that we can assume the defined role.
|
|
||||||
log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q, Policy: %q)",
|
|
||||||
c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID, c.AssumeRolePolicy)
|
|
||||||
|
|
||||||
creds := awsCredentials.NewChainCredentials(providers)
|
creds := awsCredentials.NewChainCredentials(providers)
|
||||||
cp, err := creds.Get()
|
cp, err := creds.Get()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
|
if IsAWSErr(err, "NoCredentialProviders", "") {
|
||||||
return nil, errors.New(`No valid credential sources found for AWS Provider.
|
creds, err = GetCredentialsFromSession(c)
|
||||||
Please see https://terraform.io/docs/providers/aws/index.html for more information on
|
if err != nil {
|
||||||
providing credentials for the AWS Provider`)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the "normal" flow (i.e. not assuming a role)
|
||||||
|
if c.AssumeRoleARN == "" {
|
||||||
|
return creds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise we need to construct an STS client with the main credentials, and verify
|
||||||
|
// that we can assume the defined role.
|
||||||
|
log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q, Policy: %q)",
|
||||||
|
c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID, c.AssumeRolePolicy)
|
||||||
|
|
||||||
awsConfig := &aws.Config{
|
awsConfig := &aws.Config{
|
||||||
Credentials: creds,
|
Credentials: creds,
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
module github.com/hashicorp/aws-sdk-go-base
|
module github.com/hashicorp/aws-sdk-go-base
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go v1.16.36
|
github.com/aws/aws-sdk-go v1.25.3
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.0
|
github.com/hashicorp/go-cleanhttp v0.5.0
|
||||||
github.com/hashicorp/go-multierror v1.0.0
|
github.com/hashicorp/go-multierror v1.0.0
|
||||||
github.com/stretchr/testify v1.3.0 // indirect
|
github.com/stretchr/testify v1.3.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd // indirect
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd // indirect
|
||||||
golang.org/x/text v0.3.0 // indirect
|
golang.org/x/text v0.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
github.com/aws/aws-sdk-go v1.16.36 h1:POeH34ZME++pr7GBGh+ZO6Y5kOwSMQpqp5BGUgooJ6k=
|
github.com/aws/aws-sdk-go v1.16.36 h1:POeH34ZME++pr7GBGh+ZO6Y5kOwSMQpqp5BGUgooJ6k=
|
||||||
github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
|
github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ=
|
||||||
|
github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -13,9 +14,8 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetMockedAwsApiSession establishes a httptest server to simulate behaviour
|
// MockAwsApiServer establishes a httptest server to simulate behaviour of a real AWS API server
|
||||||
// of a real AWS API server
|
func MockAwsApiServer(svcName string, endpoints []*MockEndpoint) *httptest.Server {
|
||||||
func GetMockedAwsApiSession(svcName string, endpoints []*MockEndpoint) (func(), *session.Session, error) {
|
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if _, err := buf.ReadFrom(r.Body); err != nil {
|
if _, err := buf.ReadFrom(r.Body); err != nil {
|
||||||
|
@ -46,6 +46,13 @@ func GetMockedAwsApiSession(svcName string, endpoints []*MockEndpoint) (func(),
|
||||||
w.WriteHeader(400)
|
w.WriteHeader(400)
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMockedAwsApiSession establishes an AWS session to a simulated AWS API server for a given service and route endpoints.
|
||||||
|
func GetMockedAwsApiSession(svcName string, endpoints []*MockEndpoint) (func(), *session.Session, error) {
|
||||||
|
ts := MockAwsApiServer(svcName, endpoints)
|
||||||
|
|
||||||
sc := awsCredentials.NewStaticCredentials("accessKey", "secretKey", "")
|
sc := awsCredentials.NewStaticCredentials("accessKey", "secretKey", "")
|
||||||
|
|
||||||
sess, err := session.NewSession(&aws.Config{
|
sess, err := session.NewSession(&aws.Config{
|
||||||
|
@ -58,19 +65,166 @@ func GetMockedAwsApiSession(svcName string, endpoints []*MockEndpoint) (func(),
|
||||||
return ts.Close, sess, err
|
return ts.Close, sess, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// awsMetadataApiMock establishes a httptest server to mock out the internal AWS Metadata
|
||||||
|
// service. IAM Credentials are retrieved by the EC2RoleProvider, which makes
|
||||||
|
// API calls to this internal URL. By replacing the server with a test server,
|
||||||
|
// we can simulate an AWS environment
|
||||||
|
func awsMetadataApiMock(responses []*MetadataResponse) func() {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
|
w.Header().Add("Server", "MockEC2")
|
||||||
|
log.Printf("[DEBUG] Mocker server received request to %q", r.RequestURI)
|
||||||
|
for _, e := range responses {
|
||||||
|
if r.RequestURI == e.Uri {
|
||||||
|
fmt.Fprintln(w, e.Body)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.WriteHeader(400)
|
||||||
|
}))
|
||||||
|
|
||||||
|
os.Setenv("AWS_METADATA_URL", ts.URL+"/latest")
|
||||||
|
return ts.Close
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockEndpoint represents a basic request and response that can be used for creating simple httptest server routes.
|
||||||
type MockEndpoint struct {
|
type MockEndpoint struct {
|
||||||
Request *MockRequest
|
Request *MockRequest
|
||||||
Response *MockResponse
|
Response *MockResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MockRequest represents a basic HTTP request
|
||||||
type MockRequest struct {
|
type MockRequest struct {
|
||||||
Method string
|
Method string
|
||||||
Uri string
|
Uri string
|
||||||
Body string
|
Body string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MockResponse represents a basic HTTP response.
|
||||||
type MockResponse struct {
|
type MockResponse struct {
|
||||||
StatusCode int
|
StatusCode int
|
||||||
Body string
|
Body string
|
||||||
ContentType string
|
ContentType string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MetadataResponse represents a metadata server response URI and body
|
||||||
|
type MetadataResponse struct {
|
||||||
|
Uri string `json:"uri"`
|
||||||
|
Body string `json:"body"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var ec2metadata_instanceIdEndpoint = &MetadataResponse{
|
||||||
|
Uri: "/latest/meta-data/instance-id",
|
||||||
|
Body: "mock-instance-id",
|
||||||
|
}
|
||||||
|
|
||||||
|
var ec2metadata_securityCredentialsEndpoints = []*MetadataResponse{
|
||||||
|
{
|
||||||
|
Uri: "/latest/meta-data/iam/security-credentials/",
|
||||||
|
Body: "test_role",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Uri: "/latest/meta-data/iam/security-credentials/test_role",
|
||||||
|
Body: "{\"Code\":\"Success\",\"LastUpdated\":\"2015-12-11T17:17:25Z\",\"Type\":\"AWS-HMAC\",\"AccessKeyId\":\"somekey\",\"SecretAccessKey\":\"somesecret\",\"Token\":\"sometoken\"}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var ec2metadata_iamInfoEndpoint = &MetadataResponse{
|
||||||
|
Uri: "/latest/meta-data/iam/info",
|
||||||
|
Body: "{\"Code\": \"Success\",\"LastUpdated\": \"2016-03-17T12:27:32Z\",\"InstanceProfileArn\": \"arn:aws:iam::000000000000:instance-profile/my-instance-profile\",\"InstanceProfileId\": \"AIPAABCDEFGHIJKLMN123\"}",
|
||||||
|
}
|
||||||
|
|
||||||
|
const ec2metadata_iamInfoEndpoint_expectedAccountID = `000000000000`
|
||||||
|
const ec2metadata_iamInfoEndpoint_expectedPartition = `aws`
|
||||||
|
|
||||||
|
const iamResponse_GetUser_valid = `<GetUserResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||||
|
<GetUserResult>
|
||||||
|
<User>
|
||||||
|
<UserId>AIDACKCEVSQ6C2EXAMPLE</UserId>
|
||||||
|
<Path>/division_abc/subdivision_xyz/</Path>
|
||||||
|
<UserName>Bob</UserName>
|
||||||
|
<Arn>arn:aws:iam::111111111111:user/division_abc/subdivision_xyz/Bob</Arn>
|
||||||
|
<CreateDate>2013-10-02T17:01:44Z</CreateDate>
|
||||||
|
<PasswordLastUsed>2014-10-10T14:37:51Z</PasswordLastUsed>
|
||||||
|
</User>
|
||||||
|
</GetUserResult>
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</GetUserResponse>`
|
||||||
|
|
||||||
|
const iamResponse_GetUser_valid_expectedAccountID = `111111111111`
|
||||||
|
const iamResponse_GetUser_valid_expectedPartition = `aws`
|
||||||
|
|
||||||
|
const iamResponse_GetUser_unauthorized = `<ErrorResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||||
|
<Error>
|
||||||
|
<Type>Sender</Type>
|
||||||
|
<Code>AccessDenied</Code>
|
||||||
|
<Message>User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: iam:GetUser on resource: arn:aws:iam::123456789012:user/Bob</Message>
|
||||||
|
</Error>
|
||||||
|
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||||
|
</ErrorResponse>`
|
||||||
|
|
||||||
|
const stsResponse_GetCallerIdentity_valid = `<GetCallerIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||||
|
<GetCallerIdentityResult>
|
||||||
|
<Arn>arn:aws:iam::222222222222:user/Alice</Arn>
|
||||||
|
<UserId>AKIAI44QH8DHBEXAMPLE</UserId>
|
||||||
|
<Account>222222222222</Account>
|
||||||
|
</GetCallerIdentityResult>
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>01234567-89ab-cdef-0123-456789abcdef</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</GetCallerIdentityResponse>`
|
||||||
|
|
||||||
|
const stsResponse_GetCallerIdentity_valid_expectedAccountID = `222222222222`
|
||||||
|
const stsResponse_GetCallerIdentity_valid_expectedPartition = `aws`
|
||||||
|
|
||||||
|
const stsResponse_GetCallerIdentity_unauthorized = `<ErrorResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||||
|
<Error>
|
||||||
|
<Type>Sender</Type>
|
||||||
|
<Code>AccessDenied</Code>
|
||||||
|
<Message>User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: sts:GetCallerIdentity</Message>
|
||||||
|
</Error>
|
||||||
|
<RequestId>01234567-89ab-cdef-0123-456789abcdef</RequestId>
|
||||||
|
</ErrorResponse>`
|
||||||
|
|
||||||
|
const iamResponse_GetUser_federatedFailure = `<ErrorResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||||
|
<Error>
|
||||||
|
<Type>Sender</Type>
|
||||||
|
<Code>ValidationError</Code>
|
||||||
|
<Message>Must specify userName when calling with non-User credentials</Message>
|
||||||
|
</Error>
|
||||||
|
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||||
|
</ErrorResponse>`
|
||||||
|
|
||||||
|
const iamResponse_ListRoles_valid = `<ListRolesResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||||
|
<ListRolesResult>
|
||||||
|
<IsTruncated>true</IsTruncated>
|
||||||
|
<Marker>AWceSSsKsazQ4IEplT9o4hURCzBs00iavlEvEXAMPLE</Marker>
|
||||||
|
<Roles>
|
||||||
|
<member>
|
||||||
|
<Path>/</Path>
|
||||||
|
<AssumeRolePolicyDocument>%7B%22Version%22%3A%222008-10-17%22%2C%22Statement%22%3A%5B%7B%22Sid%22%3A%22%22%2C%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22ec2.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%5D%7D</AssumeRolePolicyDocument>
|
||||||
|
<RoleId>AROACKCEVSQ6C2EXAMPLE</RoleId>
|
||||||
|
<RoleName>elasticbeanstalk-role</RoleName>
|
||||||
|
<Arn>arn:aws:iam::444444444444:role/elasticbeanstalk-role</Arn>
|
||||||
|
<CreateDate>2013-10-02T17:01:44Z</CreateDate>
|
||||||
|
</member>
|
||||||
|
</Roles>
|
||||||
|
</ListRolesResult>
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</ListRolesResponse>`
|
||||||
|
|
||||||
|
const iamResponse_ListRoles_valid_expectedAccountID = `444444444444`
|
||||||
|
const iamResponse_ListRoles_valid_expectedPartition = `aws`
|
||||||
|
|
||||||
|
const iamResponse_ListRoles_unauthorized = `<ErrorResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||||
|
<Error>
|
||||||
|
<Type>Sender</Type>
|
||||||
|
<Code>AccessDenied</Code>
|
||||||
|
<Message>User: arn:aws:iam::123456789012:user/Bob is not authorized to perform: iam:ListRoles on resource: arn:aws:iam::123456789012:role/</Message>
|
||||||
|
</Error>
|
||||||
|
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||||
|
</ErrorResponse>`
|
||||||
|
|
|
@ -2,7 +2,6 @@ package awsbase
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -28,45 +27,14 @@ func GetSessionOptions(c *Config) (*session.Options, error) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get and validate credentials
|
||||||
creds, err := GetCredentials(c)
|
creds, err := GetCredentials(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call Get to check for credential provider. If nothing found, we'll get an
|
|
||||||
// error, and we can present it nicely to the user
|
|
||||||
cp, err := creds.Get()
|
|
||||||
if err != nil {
|
|
||||||
if IsAWSErr(err, "NoCredentialProviders", "") {
|
|
||||||
// If a profile wasn't specified, the session may still be able to resolve credentials from shared config.
|
|
||||||
if c.Profile == "" {
|
|
||||||
sess, err := session.NewSession()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New(`No valid credential sources found for AWS Provider.
|
|
||||||
Please see https://terraform.io/docs/providers/aws/index.html for more information on
|
|
||||||
providing credentials for the AWS Provider`)
|
|
||||||
}
|
|
||||||
_, err = sess.Config.Credentials.Get()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New(`No valid credential sources found for AWS Provider.
|
|
||||||
Please see https://terraform.io/docs/providers/aws/index.html for more information on
|
|
||||||
providing credentials for the AWS Provider`)
|
|
||||||
}
|
|
||||||
log.Printf("[INFO] Using session-derived AWS Auth")
|
|
||||||
options.Config.Credentials = sess.Config.Credentials
|
|
||||||
} else {
|
|
||||||
log.Printf("[INFO] AWS Auth using Profile: %q", c.Profile)
|
|
||||||
options.Profile = c.Profile
|
|
||||||
options.SharedConfigState = session.SharedConfigEnable
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// add the validated credentials to the session options
|
// add the validated credentials to the session options
|
||||||
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
|
|
||||||
options.Config.Credentials = creds
|
options.Config.Credentials = creds
|
||||||
}
|
|
||||||
|
|
||||||
if c.Insecure {
|
if c.Insecure {
|
||||||
transport := options.Config.HTTPClient.Transport.(*http.Transport)
|
transport := options.Config.HTTPClient.Transport.(*http.Transport)
|
||||||
|
@ -83,7 +51,7 @@ func GetSessionOptions(c *Config) (*session.Options, error) {
|
||||||
return options, nil
|
return options, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSession attempts to return valid AWS Go SDK session
|
// GetSession attempts to return valid AWS Go SDK session.
|
||||||
func GetSession(c *Config) (*session.Session, error) {
|
func GetSession(c *Config) (*session.Session, error) {
|
||||||
options, err := GetSessionOptions(c)
|
options, err := GetSessionOptions(c)
|
||||||
|
|
||||||
|
@ -94,9 +62,7 @@ func GetSession(c *Config) (*session.Session, error) {
|
||||||
sess, err := session.NewSessionWithOptions(*options)
|
sess, err := session.NewSessionWithOptions(*options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if IsAWSErr(err, "NoCredentialProviders", "") {
|
if IsAWSErr(err, "NoCredentialProviders", "") {
|
||||||
return nil, errors.New(`No valid credential sources found for AWS Provider.
|
return nil, ErrNoValidCredentialSources
|
||||||
Please see https://terraform.io/docs/providers/aws/index.html for more information on
|
|
||||||
providing credentials for the AWS Provider`)
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("Error creating AWS session: %s", err)
|
return nil, fmt.Errorf("Error creating AWS session: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -138,7 +104,7 @@ func GetSession(c *Config) (*session.Session, error) {
|
||||||
if !c.SkipCredsValidation {
|
if !c.SkipCredsValidation {
|
||||||
stsClient := sts.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.StsEndpoint)}))
|
stsClient := sts.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.StsEndpoint)}))
|
||||||
if _, _, err := GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsClient); err != nil {
|
if _, _, err := GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsClient); err != nil {
|
||||||
return nil, fmt.Errorf("error validating provider credentials: %s", err)
|
return nil, fmt.Errorf("error using credentials to get account ID: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ func ValidateAccountID(accountID string, allowedAccountIDs, forbiddenAccountIDs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("AWS Account ID not allowed: %s)", accountID)
|
return fmt.Errorf("AWS Account ID not allowed: %s", accountID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -1,99 +0,0 @@
|
||||||
// This is a 'go generate'-oriented program for producing the "Variables"
|
|
||||||
// method on every Expression implementation found within this package.
|
|
||||||
// All expressions share the same implementation for this method, which
|
|
||||||
// just wraps the package-level function "Variables" and uses an AST walk
|
|
||||||
// to do its work.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fs := token.NewFileSet()
|
|
||||||
pkgs, err := parser.ParseDir(fs, ".", nil, 0)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "error while parsing: %s\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
pkg := pkgs["hclsyntax"]
|
|
||||||
|
|
||||||
// Walk all the files and collect the receivers of any "Value" methods
|
|
||||||
// that look like they are trying to implement Expression.
|
|
||||||
var recvs []string
|
|
||||||
for _, f := range pkg.Files {
|
|
||||||
for _, decl := range f.Decls {
|
|
||||||
fd, ok := decl.(*ast.FuncDecl)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fd.Name.Name != "Value" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
results := fd.Type.Results.List
|
|
||||||
if len(results) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
valResult := fd.Type.Results.List[0].Type.(*ast.SelectorExpr).X.(*ast.Ident)
|
|
||||||
diagsResult := fd.Type.Results.List[1].Type.(*ast.SelectorExpr).X.(*ast.Ident)
|
|
||||||
|
|
||||||
if valResult.Name != "cty" && diagsResult.Name != "hcl" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have a method called Value and it returns something in
|
|
||||||
// "cty" followed by something in "hcl" then that's specific enough
|
|
||||||
// for now, even though this is not 100% exact as a correct
|
|
||||||
// implementation of Value.
|
|
||||||
|
|
||||||
recvTy := fd.Recv.List[0].Type
|
|
||||||
|
|
||||||
switch rtt := recvTy.(type) {
|
|
||||||
case *ast.StarExpr:
|
|
||||||
name := rtt.X.(*ast.Ident).Name
|
|
||||||
recvs = append(recvs, fmt.Sprintf("*%s", name))
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "don't know what to do with a %T receiver\n", recvTy)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(recvs)
|
|
||||||
|
|
||||||
of, err := os.OpenFile("expression_vars.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "failed to open output file: %s\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(of, outputPreamble)
|
|
||||||
for _, recv := range recvs {
|
|
||||||
fmt.Fprintf(of, outputMethodFmt, recv)
|
|
||||||
}
|
|
||||||
fmt.Fprint(of, "\n")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
const outputPreamble = `package hclsyntax
|
|
||||||
|
|
||||||
// Generated by expression_vars_get.go. DO NOT EDIT.
|
|
||||||
// Run 'go generate' on this package to update the set of functions here.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/hashicorp/hcl/v2"
|
|
||||||
)`
|
|
||||||
|
|
||||||
const outputMethodFmt = `
|
|
||||||
|
|
||||||
func (e %s) Variables() []hcl.Traversal {
|
|
||||||
return Variables(e)
|
|
||||||
}`
|
|
|
@ -1,99 +0,0 @@
|
||||||
// This is a 'go generate'-oriented program for producing the "Variables"
|
|
||||||
// method on every Expression implementation found within this package.
|
|
||||||
// All expressions share the same implementation for this method, which
|
|
||||||
// just wraps the package-level function "Variables" and uses an AST walk
|
|
||||||
// to do its work.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fs := token.NewFileSet()
|
|
||||||
pkgs, err := parser.ParseDir(fs, ".", nil, 0)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "error while parsing: %s\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
pkg := pkgs["hclsyntax"]
|
|
||||||
|
|
||||||
// Walk all the files and collect the receivers of any "Value" methods
|
|
||||||
// that look like they are trying to implement Expression.
|
|
||||||
var recvs []string
|
|
||||||
for _, f := range pkg.Files {
|
|
||||||
for _, decl := range f.Decls {
|
|
||||||
fd, ok := decl.(*ast.FuncDecl)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fd.Name.Name != "Value" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
results := fd.Type.Results.List
|
|
||||||
if len(results) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
valResult := fd.Type.Results.List[0].Type.(*ast.SelectorExpr).X.(*ast.Ident)
|
|
||||||
diagsResult := fd.Type.Results.List[1].Type.(*ast.SelectorExpr).X.(*ast.Ident)
|
|
||||||
|
|
||||||
if valResult.Name != "cty" && diagsResult.Name != "hcl" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have a method called Value and it returns something in
|
|
||||||
// "cty" followed by something in "hcl" then that's specific enough
|
|
||||||
// for now, even though this is not 100% exact as a correct
|
|
||||||
// implementation of Value.
|
|
||||||
|
|
||||||
recvTy := fd.Recv.List[0].Type
|
|
||||||
|
|
||||||
switch rtt := recvTy.(type) {
|
|
||||||
case *ast.StarExpr:
|
|
||||||
name := rtt.X.(*ast.Ident).Name
|
|
||||||
recvs = append(recvs, fmt.Sprintf("*%s", name))
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "don't know what to do with a %T receiver\n", recvTy)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(recvs)
|
|
||||||
|
|
||||||
of, err := os.OpenFile("expression_vars.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "failed to open output file: %s\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(of, outputPreamble)
|
|
||||||
for _, recv := range recvs {
|
|
||||||
fmt.Fprintf(of, outputMethodFmt, recv)
|
|
||||||
}
|
|
||||||
fmt.Fprint(of, "\n")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
const outputPreamble = `package hclsyntax
|
|
||||||
|
|
||||||
// Generated by expression_vars_get.go. DO NOT EDIT.
|
|
||||||
// Run 'go generate' on this package to update the set of functions here.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/hashicorp/hcl2/hcl"
|
|
||||||
)`
|
|
||||||
|
|
||||||
const outputMethodFmt = `
|
|
||||||
|
|
||||||
func (e %s) Variables() []hcl.Traversal {
|
|
||||||
return Variables(e)
|
|
||||||
}`
|
|
|
@ -1,93 +0,0 @@
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// Generate the table of OID values
|
|
||||||
// Run with 'go run gen.go'.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
_ "github.com/lib/pq"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OID represent a postgres Object Identifier Type.
|
|
||||||
type OID struct {
|
|
||||||
ID int
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns an upper case version of the oid type.
|
|
||||||
func (o OID) Name() string {
|
|
||||||
return strings.ToUpper(o.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
datname := os.Getenv("PGDATABASE")
|
|
||||||
sslmode := os.Getenv("PGSSLMODE")
|
|
||||||
|
|
||||||
if datname == "" {
|
|
||||||
os.Setenv("PGDATABASE", "pqgotest")
|
|
||||||
}
|
|
||||||
|
|
||||||
if sslmode == "" {
|
|
||||||
os.Setenv("PGSSLMODE", "disable")
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err := sql.Open("postgres", "")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
rows, err := db.Query(`
|
|
||||||
SELECT typname, oid
|
|
||||||
FROM pg_type WHERE oid < 10000
|
|
||||||
ORDER BY oid;
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
oids := make([]*OID, 0)
|
|
||||||
for rows.Next() {
|
|
||||||
var oid OID
|
|
||||||
if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
oids = append(oids, &oid)
|
|
||||||
}
|
|
||||||
if err = rows.Err(); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
cmd := exec.Command("gofmt")
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
w, err := cmd.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
f, err := os.Create("types.go")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
cmd.Stdout = f
|
|
||||||
err = cmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
|
|
||||||
fmt.Fprintln(w, "\npackage oid")
|
|
||||||
fmt.Fprintln(w, "const (")
|
|
||||||
for _, oid := range oids {
|
|
||||||
fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, ")")
|
|
||||||
fmt.Fprintln(w, "var TypeName = map[Oid]string{")
|
|
||||||
for _, oid := range oids {
|
|
||||||
fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, "}")
|
|
||||||
w.Close()
|
|
||||||
cmd.Wait()
|
|
||||||
}
|
|
|
@ -1,508 +0,0 @@
|
||||||
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package codec
|
|
||||||
|
|
||||||
import "reflect"
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder.
|
|
||||||
|
|
||||||
We are attempting this due to perceived issues with encoding/xml:
|
|
||||||
- Complicated. It tried to do too much, and is not as simple to use as json.
|
|
||||||
- Due to over-engineering, reflection is over-used AND performance suffers:
|
|
||||||
java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/
|
|
||||||
even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html
|
|
||||||
|
|
||||||
codec framework will offer the following benefits
|
|
||||||
- VASTLY improved performance (when using reflection-mode or codecgen)
|
|
||||||
- simplicity and consistency: with the rest of the supported formats
|
|
||||||
- all other benefits of codec framework (streaming, codegeneration, etc)
|
|
||||||
|
|
||||||
codec is not a drop-in replacement for encoding/xml.
|
|
||||||
It is a replacement, based on the simplicity and performance of codec.
|
|
||||||
Look at it like JAXB for Go.
|
|
||||||
|
|
||||||
Challenges:
|
|
||||||
- Need to output XML preamble, with all namespaces at the right location in the output.
|
|
||||||
- Each "end" block is dynamic, so we need to maintain a context-aware stack
|
|
||||||
- How to decide when to use an attribute VS an element
|
|
||||||
- How to handle chardata, attr, comment EXPLICITLY.
|
|
||||||
- Should it output fragments?
|
|
||||||
e.g. encoding a bool should just output true OR false, which is not well-formed XML.
|
|
||||||
|
|
||||||
Extend the struct tag. See representative example:
|
|
||||||
type X struct {
|
|
||||||
ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"`
|
|
||||||
// format: [namespace-uri ][namespace-prefix ]local-name, ...
|
|
||||||
}
|
|
||||||
|
|
||||||
Based on this, we encode
|
|
||||||
- fields as elements, BUT
|
|
||||||
encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string)
|
|
||||||
- text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata".
|
|
||||||
|
|
||||||
To handle namespaces:
|
|
||||||
- XMLHandle is denoted as being namespace-aware.
|
|
||||||
Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name.
|
|
||||||
- *Encoder and *Decoder know whether the Handle "prefers" namespaces.
|
|
||||||
- add *Encoder.getEncName(*structFieldInfo).
|
|
||||||
No one calls *structFieldInfo.indexForEncName directly anymore
|
|
||||||
- OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware
|
|
||||||
indexForEncName takes a parameter of the form namespace:local-name OR local-name
|
|
||||||
- add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc
|
|
||||||
by being a method on *Decoder, or maybe a method on the Handle itself.
|
|
||||||
No one accesses .encName anymore
|
|
||||||
- let encode.go and decode.go use these (for consistency)
|
|
||||||
- only problem exists for gen.go, where we create a big switch on encName.
|
|
||||||
Now, we also have to add a switch on strings.endsWith(kName, encNsName)
|
|
||||||
- gen.go will need to have many more methods, and then double-on the 2 switch loops like:
|
|
||||||
switch k {
|
|
||||||
case "abc" : x.abc()
|
|
||||||
case "def" : x.def()
|
|
||||||
default {
|
|
||||||
switch {
|
|
||||||
case !nsAware: panic(...)
|
|
||||||
case strings.endsWith(":abc"): x.abc()
|
|
||||||
case strings.endsWith(":def"): x.def()
|
|
||||||
default: panic(...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
The structure below accommodates this:
|
|
||||||
|
|
||||||
type typeInfo struct {
|
|
||||||
sfi []*structFieldInfo // sorted by encName
|
|
||||||
sfins // sorted by namespace
|
|
||||||
sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately.
|
|
||||||
sfip // unsorted
|
|
||||||
}
|
|
||||||
type structFieldInfo struct {
|
|
||||||
encName
|
|
||||||
nsEncName
|
|
||||||
ns string
|
|
||||||
attr bool
|
|
||||||
cdata bool
|
|
||||||
}
|
|
||||||
|
|
||||||
indexForEncName is now an internal helper function that takes a sorted array
|
|
||||||
(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...)
|
|
||||||
|
|
||||||
There will be a separate parser from the builder.
|
|
||||||
The parser will have a method: next() xmlToken method. It has lookahead support,
|
|
||||||
so you can pop multiple tokens, make a determination, and push them back in the order popped.
|
|
||||||
This will be needed to determine whether we are "nakedly" decoding a container or not.
|
|
||||||
The stack will be implemented using a slice and push/pop happens at the [0] element.
|
|
||||||
|
|
||||||
xmlToken has fields:
|
|
||||||
- type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text
|
|
||||||
- value string
|
|
||||||
- ns string
|
|
||||||
|
|
||||||
SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL
|
|
||||||
|
|
||||||
The following are skipped when parsing:
|
|
||||||
- External Entities (from external file)
|
|
||||||
- Notation Declaration e.g. <!NOTATION GIF87A SYSTEM "GIF">
|
|
||||||
- Entity Declarations & References
|
|
||||||
- XML Declaration (assume UTF-8)
|
|
||||||
- XML Directive i.e. <! ... >
|
|
||||||
- Other Declarations: Notation, etc.
|
|
||||||
- Comment
|
|
||||||
- Processing Instruction
|
|
||||||
- schema / DTD for validation:
|
|
||||||
We are not a VALIDATING parser. Validation is done elsewhere.
|
|
||||||
However, some parts of the DTD internal subset are used (SEE BELOW).
|
|
||||||
For Attribute List Declarations e.g.
|
|
||||||
<!ATTLIST foo:oldjoke name ID #REQUIRED label CDATA #IMPLIED status ( funny | notfunny ) 'funny' >
|
|
||||||
We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED)
|
|
||||||
|
|
||||||
The following XML features are supported
|
|
||||||
- Namespace
|
|
||||||
- Element
|
|
||||||
- Attribute
|
|
||||||
- cdata
|
|
||||||
- Unicode escape
|
|
||||||
|
|
||||||
The following DTD (when as an internal sub-set) features are supported:
|
|
||||||
- Internal Entities e.g.
|
|
||||||
<!ELEMENT burns "ugorji is cool" > AND entities for the set: [<>&"']
|
|
||||||
- Parameter entities e.g.
|
|
||||||
<!ENTITY % personcontent "ugorji is cool"> <!ELEMENT burns (%personcontent;)*>
|
|
||||||
|
|
||||||
At decode time, a structure containing the following is kept
|
|
||||||
- namespace mapping
|
|
||||||
- default attribute values
|
|
||||||
- all internal entities (<>&"' and others written in the document)
|
|
||||||
|
|
||||||
When decode starts, it parses XML namespace declarations and creates a map in the
|
|
||||||
xmlDecDriver. While parsing, that map continuously gets updated.
|
|
||||||
The only problem happens when a namespace declaration happens on the node that it defines.
|
|
||||||
e.g. <hn:name xmlns:hn="http://www.ugorji.net" >
|
|
||||||
To handle this, each Element must be fully parsed at a time,
|
|
||||||
even if it amounts to multiple tokens which are returned one at a time on request.
|
|
||||||
|
|
||||||
xmlns is a special attribute name.
|
|
||||||
- It is used to define namespaces, including the default
|
|
||||||
- It is never returned as an AttrKey or AttrVal.
|
|
||||||
*We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.*
|
|
||||||
|
|
||||||
Number, bool, null, mapKey, etc can all be decoded from any xmlToken.
|
|
||||||
This accommodates map[int]string for example.
|
|
||||||
|
|
||||||
It should be possible to create a schema from the types,
|
|
||||||
or vice versa (generate types from schema with appropriate tags).
|
|
||||||
This is however out-of-scope from this parsing project.
|
|
||||||
|
|
||||||
We should write all namespace information at the first point that it is referenced in the tree,
|
|
||||||
and use the mapping for all child nodes and attributes. This means that state is maintained
|
|
||||||
at a point in the tree. This also means that calls to Decode or MustDecode will reset some state.
|
|
||||||
|
|
||||||
When decoding, it is important to keep track of entity references and default attribute values.
|
|
||||||
It seems these can only be stored in the DTD components. We should honor them when decoding.
|
|
||||||
|
|
||||||
Configuration for XMLHandle will look like this:
|
|
||||||
|
|
||||||
XMLHandle
|
|
||||||
DefaultNS string
|
|
||||||
// Encoding:
|
|
||||||
NS map[string]string // ns URI to key, used for encoding
|
|
||||||
// Decoding: in case ENTITY declared in external schema or dtd, store info needed here
|
|
||||||
Entities map[string]string // map of entity rep to character
|
|
||||||
|
|
||||||
|
|
||||||
During encode, if a namespace mapping is not defined for a namespace found on a struct,
|
|
||||||
then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict
|
|
||||||
with any other namespace mapping).
|
|
||||||
|
|
||||||
Note that different fields in a struct can have different namespaces.
|
|
||||||
However, all fields will default to the namespace on the _struct field (if defined).
|
|
||||||
|
|
||||||
An XML document is a name, a map of attributes and a list of children.
|
|
||||||
Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example).
|
|
||||||
We have to "DecodeNaked" into something that resembles XML data.
|
|
||||||
|
|
||||||
To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types:
|
|
||||||
type Name struct { // Preferred. Less allocations due to conversions.
|
|
||||||
Local string
|
|
||||||
Space string
|
|
||||||
}
|
|
||||||
type Element struct {
|
|
||||||
Name Name
|
|
||||||
Attrs map[Name]string
|
|
||||||
Children []interface{} // each child is either *Element or string
|
|
||||||
}
|
|
||||||
Only two "supporting" types are exposed for XML: Name and Element.
|
|
||||||
|
|
||||||
// ------------------
|
|
||||||
|
|
||||||
We considered 'type Name string' where Name is like "Space Local" (space-separated).
|
|
||||||
We decided against it, because each creation of a name would lead to
|
|
||||||
double allocation (first convert []byte to string, then concatenate them into a string).
|
|
||||||
The benefit is that it is faster to read Attrs from a map. But given that Element is a value
|
|
||||||
object, we want to eschew methods and have public exposed variables.
|
|
||||||
|
|
||||||
We also considered the following, where xml types were not value objects, and we used
|
|
||||||
intelligent accessor methods to extract information and for performance.
|
|
||||||
*** WE DECIDED AGAINST THIS. ***
|
|
||||||
type Attr struct {
|
|
||||||
Name Name
|
|
||||||
Value string
|
|
||||||
}
|
|
||||||
// Element is a ValueObject: There are no accessor methods.
|
|
||||||
// Make element self-contained.
|
|
||||||
type Element struct {
|
|
||||||
Name Name
|
|
||||||
attrsMap map[string]string // where key is "Space Local"
|
|
||||||
attrs []Attr
|
|
||||||
childrenT []string
|
|
||||||
childrenE []Element
|
|
||||||
childrenI []int // each child is a index into T or E.
|
|
||||||
}
|
|
||||||
func (x *Element) child(i) interface{} // returns string or *Element
|
|
||||||
|
|
||||||
// ------------------
|
|
||||||
|
|
||||||
Per XML spec and our default handling, white space is always treated as
|
|
||||||
insignificant between elements, except in a text node. The xml:space='preserve'
|
|
||||||
attribute is ignored.
|
|
||||||
|
|
||||||
**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.**
|
|
||||||
**So treat them as just "directives" that should be interpreted to mean something**.
|
|
||||||
|
|
||||||
On encoding, we support indenting aka prettifying markup in the same way we support it for json.
|
|
||||||
|
|
||||||
A document or element can only be encoded/decoded from/to a struct. In this mode:
|
|
||||||
- struct name maps to element name (or tag-info from _struct field)
|
|
||||||
- fields are mapped to child elements or attributes
|
|
||||||
|
|
||||||
A map is either encoded as attributes on current element, or as a set of child elements.
|
|
||||||
Maps are encoded as attributes iff their keys and values are primitives (number, bool, string).
|
|
||||||
|
|
||||||
A list is encoded as a set of child elements.
|
|
||||||
|
|
||||||
Primitives (number, bool, string) are encoded as an element, attribute or text
|
|
||||||
depending on the context.
|
|
||||||
|
|
||||||
Extensions must encode themselves as a text string.
|
|
||||||
|
|
||||||
Encoding is tough, specifically when encoding mappings, because we need to encode
|
|
||||||
as either attribute or element. To do this, we need to default to encoding as attributes,
|
|
||||||
and then let Encoder inform the Handle when to start encoding as nodes.
|
|
||||||
i.e. Encoder does something like:
|
|
||||||
|
|
||||||
h.EncodeMapStart()
|
|
||||||
h.Encode(), h.Encode(), ...
|
|
||||||
h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal
|
|
||||||
h.Encode(), h.Encode(), ...
|
|
||||||
h.EncodeEnd()
|
|
||||||
|
|
||||||
Only XMLHandle understands this, and will set itself to start encoding as elements.
|
|
||||||
|
|
||||||
This support extends to maps. For example, if a struct field is a map, and it has
|
|
||||||
the struct tag signifying it should be attr, then all its fields are encoded as attributes.
|
|
||||||
e.g.
|
|
||||||
|
|
||||||
type X struct {
|
|
||||||
M map[string]int `codec:"m,attr"` // encode keys as attributes named
|
|
||||||
}
|
|
||||||
|
|
||||||
Question:
|
|
||||||
- if encoding a map, what if map keys have spaces in them???
|
|
||||||
Then they cannot be attributes or child elements. Error.
|
|
||||||
|
|
||||||
Options to consider adding later:
|
|
||||||
- For attribute values, normalize by trimming beginning and ending white space,
|
|
||||||
and converting every white space sequence to a single space.
|
|
||||||
- ATTLIST restrictions are enforced.
|
|
||||||
e.g. default value of xml:space, skipping xml:XYZ style attributes, etc.
|
|
||||||
- Consider supporting NON-STRICT mode (e.g. to handle HTML parsing).
|
|
||||||
Some elements e.g. br, hr, etc need not close and should be auto-closed
|
|
||||||
... (see http://www.w3.org/TR/html4/loose.dtd)
|
|
||||||
An expansive set of entities are pre-defined.
|
|
||||||
- Have easy way to create a HTML parser:
|
|
||||||
add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose,
|
|
||||||
and add HTML Entities to the list.
|
|
||||||
- Support validating element/attribute XMLName before writing it.
|
|
||||||
Keep this behind a flag, which is set to false by default (for performance).
|
|
||||||
type XMLHandle struct {
|
|
||||||
CheckName bool
|
|
||||||
}
|
|
||||||
|
|
||||||
Misc:
|
|
||||||
|
|
||||||
ROADMAP (1 weeks):
|
|
||||||
- build encoder (1 day)
|
|
||||||
- build decoder (based off xmlParser) (1 day)
|
|
||||||
- implement xmlParser (2 days).
|
|
||||||
Look at encoding/xml for inspiration.
|
|
||||||
- integrate and TEST (1 days)
|
|
||||||
- write article and post it (1 day)
|
|
||||||
|
|
||||||
// ---------- MORE NOTES FROM 2017-11-30 ------------
|
|
||||||
|
|
||||||
when parsing
|
|
||||||
- parse the attributes first
|
|
||||||
- then parse the nodes
|
|
||||||
|
|
||||||
basically:
|
|
||||||
- if encoding a field: we use the field name for the wrapper
|
|
||||||
- if encoding a non-field, then just use the element type name
|
|
||||||
|
|
||||||
map[string]string ==> <map><key>abc</key><value>val</value></map>... or
|
|
||||||
<map key="abc">val</map>... OR
|
|
||||||
<key1>val1</key1><key2>val2</key2>... <- PREFERED
|
|
||||||
[]string ==> <string>v1</string><string>v2</string>...
|
|
||||||
string v1 ==> <string>v1</string>
|
|
||||||
bool true ==> <bool>true</bool>
|
|
||||||
float 1.0 ==> <float>1.0</float>
|
|
||||||
...
|
|
||||||
|
|
||||||
F1 map[string]string ==> <F1><key>abc</key><value>val</value></F1>... OR
|
|
||||||
<F1 key="abc">val</F1>... OR
|
|
||||||
<F1><abc>val</abc>...</F1> <- PREFERED
|
|
||||||
F2 []string ==> <F2>v1</F2><F2>v2</F2>...
|
|
||||||
F3 bool ==> <F3>true</F3>
|
|
||||||
...
|
|
||||||
|
|
||||||
- a scalar is encoded as:
|
|
||||||
(value) of type T ==> <T><value/></T>
|
|
||||||
(value) of field F ==> <F><value/></F>
|
|
||||||
- A kv-pair is encoded as:
|
|
||||||
(key,value) ==> <map><key><value/></key></map> OR <map key="value">
|
|
||||||
(key,value) of field F ==> <F><key><value/></key></F> OR <F key="value">
|
|
||||||
- A map or struct is just a list of kv-pairs
|
|
||||||
- A list is encoded as sequences of same node e.g.
|
|
||||||
<F1 key1="value11">
|
|
||||||
<F1 key2="value12">
|
|
||||||
<F2>value21</F2>
|
|
||||||
<F2>value22</F2>
|
|
||||||
- we may have to singularize the field name, when entering into xml,
|
|
||||||
and pluralize them when encoding.
|
|
||||||
- bi-directional encode->decode->encode is not a MUST.
|
|
||||||
even encoding/xml cannot decode correctly what was encoded:
|
|
||||||
|
|
||||||
see https://play.golang.org/p/224V_nyhMS
|
|
||||||
func main() {
|
|
||||||
fmt.Println("Hello, playground")
|
|
||||||
v := []interface{}{"hello", 1, true, nil, time.Now()}
|
|
||||||
s, err := xml.Marshal(v)
|
|
||||||
fmt.Printf("err: %v, \ns: %s\n", err, s)
|
|
||||||
var v2 []interface{}
|
|
||||||
err = xml.Unmarshal(s, &v2)
|
|
||||||
fmt.Printf("err: %v, \nv2: %v\n", err, v2)
|
|
||||||
type T struct {
|
|
||||||
V []interface{}
|
|
||||||
}
|
|
||||||
v3 := T{V: v}
|
|
||||||
s, err = xml.Marshal(v3)
|
|
||||||
fmt.Printf("err: %v, \ns: %s\n", err, s)
|
|
||||||
var v4 T
|
|
||||||
err = xml.Unmarshal(s, &v4)
|
|
||||||
fmt.Printf("err: %v, \nv4: %v\n", err, v4)
|
|
||||||
}
|
|
||||||
Output:
|
|
||||||
err: <nil>,
|
|
||||||
s: <string>hello</string><int>1</int><bool>true</bool><Time>2009-11-10T23:00:00Z</Time>
|
|
||||||
err: <nil>,
|
|
||||||
v2: [<nil>]
|
|
||||||
err: <nil>,
|
|
||||||
s: <T><V>hello</V><V>1</V><V>true</V><V>2009-11-10T23:00:00Z</V></T>
|
|
||||||
err: <nil>,
|
|
||||||
v4: {[<nil> <nil> <nil> <nil>]}
|
|
||||||
-
|
|
||||||
*/
|
|
||||||
|
|
||||||
// ----------- PARSER -------------------
|
|
||||||
|
|
||||||
type xmlTokenType uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
_ xmlTokenType = iota << 1
|
|
||||||
xmlTokenElemStart
|
|
||||||
xmlTokenElemEnd
|
|
||||||
xmlTokenAttrKey
|
|
||||||
xmlTokenAttrVal
|
|
||||||
xmlTokenText
|
|
||||||
)
|
|
||||||
|
|
||||||
type xmlToken struct {
|
|
||||||
Type xmlTokenType
|
|
||||||
Value string
|
|
||||||
Namespace string // blank for AttrVal and Text
|
|
||||||
}
|
|
||||||
|
|
||||||
type xmlParser struct {
|
|
||||||
r decReader
|
|
||||||
toks []xmlToken // list of tokens.
|
|
||||||
ptr int // ptr into the toks slice
|
|
||||||
done bool // nothing else to parse. r now returns EOF.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *xmlParser) next() (t *xmlToken) {
|
|
||||||
// once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish)
|
|
||||||
if !x.done && len(x.toks) == 0 {
|
|
||||||
x.nextTag()
|
|
||||||
}
|
|
||||||
// parses one element at a time (into possible many tokens)
|
|
||||||
if x.ptr < len(x.toks) {
|
|
||||||
t = &(x.toks[x.ptr])
|
|
||||||
x.ptr++
|
|
||||||
if x.ptr == len(x.toks) {
|
|
||||||
x.ptr = 0
|
|
||||||
x.toks = x.toks[:0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextTag will parses the next element and fill up toks.
|
|
||||||
// It set done flag if/once EOF is reached.
|
|
||||||
func (x *xmlParser) nextTag() {
|
|
||||||
// TODO: implement.
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------- ENCODER -------------------
|
|
||||||
|
|
||||||
type xmlEncDriver struct {
|
|
||||||
e *Encoder
|
|
||||||
w encWriter
|
|
||||||
h *XMLHandle
|
|
||||||
b [64]byte // scratch
|
|
||||||
bs []byte // scratch
|
|
||||||
// s jsonStack
|
|
||||||
noBuiltInTypes
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------- DECODER -------------------
|
|
||||||
|
|
||||||
type xmlDecDriver struct {
|
|
||||||
d *Decoder
|
|
||||||
h *XMLHandle
|
|
||||||
r decReader // *bytesDecReader decReader
|
|
||||||
ct valueType // container type. one of unset, array or map.
|
|
||||||
bstr [8]byte // scratch used for string \UXXX parsing
|
|
||||||
b [64]byte // scratch
|
|
||||||
|
|
||||||
// wsSkipped bool // whitespace skipped
|
|
||||||
|
|
||||||
// s jsonStack
|
|
||||||
|
|
||||||
noBuiltInTypes
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeNaked will decode into an XMLNode
|
|
||||||
|
|
||||||
// XMLName is a value object representing a namespace-aware NAME
|
|
||||||
type XMLName struct {
|
|
||||||
Local string
|
|
||||||
Space string
|
|
||||||
}
|
|
||||||
|
|
||||||
// XMLNode represents a "union" of the different types of XML Nodes.
|
|
||||||
// Only one of fields (Text or *Element) is set.
|
|
||||||
type XMLNode struct {
|
|
||||||
Element *Element
|
|
||||||
Text string
|
|
||||||
}
|
|
||||||
|
|
||||||
// XMLElement is a value object representing an fully-parsed XML element.
|
|
||||||
type XMLElement struct {
|
|
||||||
Name Name
|
|
||||||
Attrs map[XMLName]string
|
|
||||||
// Children is a list of child nodes, each being a *XMLElement or string
|
|
||||||
Children []XMLNode
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------- HANDLE -------------------
|
|
||||||
|
|
||||||
type XMLHandle struct {
|
|
||||||
BasicHandle
|
|
||||||
textEncodingType
|
|
||||||
|
|
||||||
DefaultNS string
|
|
||||||
NS map[string]string // ns URI to key, for encoding
|
|
||||||
Entities map[string]string // entity representation to string, for encoding.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *XMLHandle) newEncDriver(e *Encoder) encDriver {
|
|
||||||
return &xmlEncDriver{e: e, w: e.w, h: h}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *XMLHandle) newDecDriver(d *Decoder) decDriver {
|
|
||||||
// d := xmlDecDriver{r: r.(*bytesDecReader), h: h}
|
|
||||||
hd := xmlDecDriver{d: d, r: d.r, h: h}
|
|
||||||
hd.n.bytes = d.b[:]
|
|
||||||
return &hd
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
|
||||||
return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ decDriver = (*xmlDecDriver)(nil)
|
|
||||||
var _ encDriver = (*xmlEncDriver)(nil)
|
|
|
@ -1,40 +0,0 @@
|
||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
|
||||||
var buf bytes.Buffer
|
|
||||||
// compress text
|
|
||||||
w, err := xz.NewWriter(&buf)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("xz.NewWriter error %s", err)
|
|
||||||
}
|
|
||||||
if _, err := io.WriteString(w, text); err != nil {
|
|
||||||
log.Fatalf("WriteString error %s", err)
|
|
||||||
}
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
log.Fatalf("w.Close error %s", err)
|
|
||||||
}
|
|
||||||
// decompress buffer and write output to stdout
|
|
||||||
r, err := xz.NewReader(&buf)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("NewReader error %s", err)
|
|
||||||
}
|
|
||||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
|
||||||
log.Fatalf("io.Copy error %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,712 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
//go:generate go run gen.go
|
|
||||||
//go:generate go run gen.go -test
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"go/format"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// identifier converts s to a Go exported identifier.
|
|
||||||
// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
|
|
||||||
func identifier(s string) string {
|
|
||||||
b := make([]byte, 0, len(s))
|
|
||||||
cap := true
|
|
||||||
for _, c := range s {
|
|
||||||
if c == '-' {
|
|
||||||
cap = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if cap && 'a' <= c && c <= 'z' {
|
|
||||||
c -= 'a' - 'A'
|
|
||||||
}
|
|
||||||
cap = false
|
|
||||||
b = append(b, byte(c))
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
var test = flag.Bool("test", false, "generate table_test.go")
|
|
||||||
|
|
||||||
func genFile(name string, buf *bytes.Buffer) {
|
|
||||||
b, err := format.Source(buf.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err := ioutil.WriteFile(name, b, 0644); err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
var all []string
|
|
||||||
all = append(all, elements...)
|
|
||||||
all = append(all, attributes...)
|
|
||||||
all = append(all, eventHandlers...)
|
|
||||||
all = append(all, extra...)
|
|
||||||
sort.Strings(all)
|
|
||||||
|
|
||||||
// uniq - lists have dups
|
|
||||||
w := 0
|
|
||||||
for _, s := range all {
|
|
||||||
if w == 0 || all[w-1] != s {
|
|
||||||
all[w] = s
|
|
||||||
w++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
all = all[:w]
|
|
||||||
|
|
||||||
if *test {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
|
|
||||||
fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n")
|
|
||||||
fmt.Fprintln(&buf, "package atom\n")
|
|
||||||
fmt.Fprintln(&buf, "var testAtomList = []string{")
|
|
||||||
for _, s := range all {
|
|
||||||
fmt.Fprintf(&buf, "\t%q,\n", s)
|
|
||||||
}
|
|
||||||
fmt.Fprintln(&buf, "}")
|
|
||||||
|
|
||||||
genFile("table_test.go", &buf)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find hash that minimizes table size.
|
|
||||||
var best *table
|
|
||||||
for i := 0; i < 1000000; i++ {
|
|
||||||
if best != nil && 1<<(best.k-1) < len(all) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
h := rand.Uint32()
|
|
||||||
for k := uint(0); k <= 16; k++ {
|
|
||||||
if best != nil && k >= best.k {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
var t table
|
|
||||||
if t.init(h, k, all) {
|
|
||||||
best = &t
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if best == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "failed to construct string table\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lay out strings, using overlaps when possible.
|
|
||||||
layout := append([]string{}, all...)
|
|
||||||
|
|
||||||
// Remove strings that are substrings of other strings
|
|
||||||
for changed := true; changed; {
|
|
||||||
changed = false
|
|
||||||
for i, s := range layout {
|
|
||||||
if s == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for j, t := range layout {
|
|
||||||
if i != j && t != "" && strings.Contains(s, t) {
|
|
||||||
changed = true
|
|
||||||
layout[j] = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Join strings where one suffix matches another prefix.
|
|
||||||
for {
|
|
||||||
// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
|
|
||||||
// maximizing overlap length k.
|
|
||||||
besti := -1
|
|
||||||
bestj := -1
|
|
||||||
bestk := 0
|
|
||||||
for i, s := range layout {
|
|
||||||
if s == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for j, t := range layout {
|
|
||||||
if i == j {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
|
|
||||||
if s[len(s)-k:] == t[:k] {
|
|
||||||
besti = i
|
|
||||||
bestj = j
|
|
||||||
bestk = k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if bestk > 0 {
|
|
||||||
layout[besti] += layout[bestj][bestk:]
|
|
||||||
layout[bestj] = ""
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
text := strings.Join(layout, "")
|
|
||||||
|
|
||||||
atom := map[string]uint32{}
|
|
||||||
for _, s := range all {
|
|
||||||
off := strings.Index(text, s)
|
|
||||||
if off < 0 {
|
|
||||||
panic("lost string " + s)
|
|
||||||
}
|
|
||||||
atom[s] = uint32(off<<8 | len(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
// Generate the Go code.
|
|
||||||
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
|
|
||||||
fmt.Fprintln(&buf, "//go:generate go run gen.go\n")
|
|
||||||
fmt.Fprintln(&buf, "package atom\n\nconst (")
|
|
||||||
|
|
||||||
// compute max len
|
|
||||||
maxLen := 0
|
|
||||||
for _, s := range all {
|
|
||||||
if maxLen < len(s) {
|
|
||||||
maxLen = len(s)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s])
|
|
||||||
}
|
|
||||||
fmt.Fprintln(&buf, ")\n")
|
|
||||||
|
|
||||||
fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0)
|
|
||||||
fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen)
|
|
||||||
|
|
||||||
fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k)
|
|
||||||
for i, s := range best.tab {
|
|
||||||
if s == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "}\n")
|
|
||||||
datasize := (1 << best.k) * 4
|
|
||||||
|
|
||||||
fmt.Fprintln(&buf, "const atomText =")
|
|
||||||
textsize := len(text)
|
|
||||||
for len(text) > 60 {
|
|
||||||
fmt.Fprintf(&buf, "\t%q +\n", text[:60])
|
|
||||||
text = text[60:]
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "\t%q\n\n", text)
|
|
||||||
|
|
||||||
genFile("table.go", &buf)
|
|
||||||
|
|
||||||
fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
|
|
||||||
}
|
|
||||||
|
|
||||||
type byLen []string
|
|
||||||
|
|
||||||
func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
|
|
||||||
func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
func (x byLen) Len() int { return len(x) }
|
|
||||||
|
|
||||||
// fnv computes the FNV hash with an arbitrary starting value h.
|
|
||||||
func fnv(h uint32, s string) uint32 {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
h ^= uint32(s[i])
|
|
||||||
h *= 16777619
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// A table represents an attempt at constructing the lookup table.
|
|
||||||
// The lookup table uses cuckoo hashing, meaning that each string
|
|
||||||
// can be found in one of two positions.
|
|
||||||
type table struct {
|
|
||||||
h0 uint32
|
|
||||||
k uint
|
|
||||||
mask uint32
|
|
||||||
tab []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash returns the two hashes for s.
|
|
||||||
func (t *table) hash(s string) (h1, h2 uint32) {
|
|
||||||
h := fnv(t.h0, s)
|
|
||||||
h1 = h & t.mask
|
|
||||||
h2 = (h >> 16) & t.mask
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes the table with the given parameters.
|
|
||||||
// h0 is the initial hash value,
|
|
||||||
// k is the number of bits of hash value to use, and
|
|
||||||
// x is the list of strings to store in the table.
|
|
||||||
// init returns false if the table cannot be constructed.
|
|
||||||
func (t *table) init(h0 uint32, k uint, x []string) bool {
|
|
||||||
t.h0 = h0
|
|
||||||
t.k = k
|
|
||||||
t.tab = make([]string, 1<<k)
|
|
||||||
t.mask = 1<<k - 1
|
|
||||||
for _, s := range x {
|
|
||||||
if !t.insert(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert inserts s in the table.
|
|
||||||
func (t *table) insert(s string) bool {
|
|
||||||
h1, h2 := t.hash(s)
|
|
||||||
if t.tab[h1] == "" {
|
|
||||||
t.tab[h1] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if t.tab[h2] == "" {
|
|
||||||
t.tab[h2] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if t.push(h1, 0) {
|
|
||||||
t.tab[h1] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if t.push(h2, 0) {
|
|
||||||
t.tab[h2] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// push attempts to push aside the entry in slot i.
|
|
||||||
func (t *table) push(i uint32, depth int) bool {
|
|
||||||
if depth > len(t.tab) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
s := t.tab[i]
|
|
||||||
h1, h2 := t.hash(s)
|
|
||||||
j := h1 + h2 - i
|
|
||||||
if t.tab[j] != "" && !t.push(j, depth+1) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
t.tab[j] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// The lists of element names and attribute keys were taken from
|
|
||||||
// https://html.spec.whatwg.org/multipage/indices.html#index
|
|
||||||
// as of the "HTML Living Standard - Last Updated 16 April 2018" version.
|
|
||||||
|
|
||||||
// "command", "keygen" and "menuitem" have been removed from the spec,
|
|
||||||
// but are kept here for backwards compatibility.
|
|
||||||
var elements = []string{
|
|
||||||
"a",
|
|
||||||
"abbr",
|
|
||||||
"address",
|
|
||||||
"area",
|
|
||||||
"article",
|
|
||||||
"aside",
|
|
||||||
"audio",
|
|
||||||
"b",
|
|
||||||
"base",
|
|
||||||
"bdi",
|
|
||||||
"bdo",
|
|
||||||
"blockquote",
|
|
||||||
"body",
|
|
||||||
"br",
|
|
||||||
"button",
|
|
||||||
"canvas",
|
|
||||||
"caption",
|
|
||||||
"cite",
|
|
||||||
"code",
|
|
||||||
"col",
|
|
||||||
"colgroup",
|
|
||||||
"command",
|
|
||||||
"data",
|
|
||||||
"datalist",
|
|
||||||
"dd",
|
|
||||||
"del",
|
|
||||||
"details",
|
|
||||||
"dfn",
|
|
||||||
"dialog",
|
|
||||||
"div",
|
|
||||||
"dl",
|
|
||||||
"dt",
|
|
||||||
"em",
|
|
||||||
"embed",
|
|
||||||
"fieldset",
|
|
||||||
"figcaption",
|
|
||||||
"figure",
|
|
||||||
"footer",
|
|
||||||
"form",
|
|
||||||
"h1",
|
|
||||||
"h2",
|
|
||||||
"h3",
|
|
||||||
"h4",
|
|
||||||
"h5",
|
|
||||||
"h6",
|
|
||||||
"head",
|
|
||||||
"header",
|
|
||||||
"hgroup",
|
|
||||||
"hr",
|
|
||||||
"html",
|
|
||||||
"i",
|
|
||||||
"iframe",
|
|
||||||
"img",
|
|
||||||
"input",
|
|
||||||
"ins",
|
|
||||||
"kbd",
|
|
||||||
"keygen",
|
|
||||||
"label",
|
|
||||||
"legend",
|
|
||||||
"li",
|
|
||||||
"link",
|
|
||||||
"main",
|
|
||||||
"map",
|
|
||||||
"mark",
|
|
||||||
"menu",
|
|
||||||
"menuitem",
|
|
||||||
"meta",
|
|
||||||
"meter",
|
|
||||||
"nav",
|
|
||||||
"noscript",
|
|
||||||
"object",
|
|
||||||
"ol",
|
|
||||||
"optgroup",
|
|
||||||
"option",
|
|
||||||
"output",
|
|
||||||
"p",
|
|
||||||
"param",
|
|
||||||
"picture",
|
|
||||||
"pre",
|
|
||||||
"progress",
|
|
||||||
"q",
|
|
||||||
"rp",
|
|
||||||
"rt",
|
|
||||||
"ruby",
|
|
||||||
"s",
|
|
||||||
"samp",
|
|
||||||
"script",
|
|
||||||
"section",
|
|
||||||
"select",
|
|
||||||
"slot",
|
|
||||||
"small",
|
|
||||||
"source",
|
|
||||||
"span",
|
|
||||||
"strong",
|
|
||||||
"style",
|
|
||||||
"sub",
|
|
||||||
"summary",
|
|
||||||
"sup",
|
|
||||||
"table",
|
|
||||||
"tbody",
|
|
||||||
"td",
|
|
||||||
"template",
|
|
||||||
"textarea",
|
|
||||||
"tfoot",
|
|
||||||
"th",
|
|
||||||
"thead",
|
|
||||||
"time",
|
|
||||||
"title",
|
|
||||||
"tr",
|
|
||||||
"track",
|
|
||||||
"u",
|
|
||||||
"ul",
|
|
||||||
"var",
|
|
||||||
"video",
|
|
||||||
"wbr",
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
|
|
||||||
//
|
|
||||||
// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup",
|
|
||||||
// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec,
|
|
||||||
// but are kept here for backwards compatibility.
|
|
||||||
var attributes = []string{
|
|
||||||
"abbr",
|
|
||||||
"accept",
|
|
||||||
"accept-charset",
|
|
||||||
"accesskey",
|
|
||||||
"action",
|
|
||||||
"allowfullscreen",
|
|
||||||
"allowpaymentrequest",
|
|
||||||
"allowusermedia",
|
|
||||||
"alt",
|
|
||||||
"as",
|
|
||||||
"async",
|
|
||||||
"autocomplete",
|
|
||||||
"autofocus",
|
|
||||||
"autoplay",
|
|
||||||
"challenge",
|
|
||||||
"charset",
|
|
||||||
"checked",
|
|
||||||
"cite",
|
|
||||||
"class",
|
|
||||||
"color",
|
|
||||||
"cols",
|
|
||||||
"colspan",
|
|
||||||
"command",
|
|
||||||
"content",
|
|
||||||
"contenteditable",
|
|
||||||
"contextmenu",
|
|
||||||
"controls",
|
|
||||||
"coords",
|
|
||||||
"crossorigin",
|
|
||||||
"data",
|
|
||||||
"datetime",
|
|
||||||
"default",
|
|
||||||
"defer",
|
|
||||||
"dir",
|
|
||||||
"dirname",
|
|
||||||
"disabled",
|
|
||||||
"download",
|
|
||||||
"draggable",
|
|
||||||
"dropzone",
|
|
||||||
"enctype",
|
|
||||||
"for",
|
|
||||||
"form",
|
|
||||||
"formaction",
|
|
||||||
"formenctype",
|
|
||||||
"formmethod",
|
|
||||||
"formnovalidate",
|
|
||||||
"formtarget",
|
|
||||||
"headers",
|
|
||||||
"height",
|
|
||||||
"hidden",
|
|
||||||
"high",
|
|
||||||
"href",
|
|
||||||
"hreflang",
|
|
||||||
"http-equiv",
|
|
||||||
"icon",
|
|
||||||
"id",
|
|
||||||
"inputmode",
|
|
||||||
"integrity",
|
|
||||||
"is",
|
|
||||||
"ismap",
|
|
||||||
"itemid",
|
|
||||||
"itemprop",
|
|
||||||
"itemref",
|
|
||||||
"itemscope",
|
|
||||||
"itemtype",
|
|
||||||
"keytype",
|
|
||||||
"kind",
|
|
||||||
"label",
|
|
||||||
"lang",
|
|
||||||
"list",
|
|
||||||
"loop",
|
|
||||||
"low",
|
|
||||||
"manifest",
|
|
||||||
"max",
|
|
||||||
"maxlength",
|
|
||||||
"media",
|
|
||||||
"mediagroup",
|
|
||||||
"method",
|
|
||||||
"min",
|
|
||||||
"minlength",
|
|
||||||
"multiple",
|
|
||||||
"muted",
|
|
||||||
"name",
|
|
||||||
"nomodule",
|
|
||||||
"nonce",
|
|
||||||
"novalidate",
|
|
||||||
"open",
|
|
||||||
"optimum",
|
|
||||||
"pattern",
|
|
||||||
"ping",
|
|
||||||
"placeholder",
|
|
||||||
"playsinline",
|
|
||||||
"poster",
|
|
||||||
"preload",
|
|
||||||
"radiogroup",
|
|
||||||
"readonly",
|
|
||||||
"referrerpolicy",
|
|
||||||
"rel",
|
|
||||||
"required",
|
|
||||||
"reversed",
|
|
||||||
"rows",
|
|
||||||
"rowspan",
|
|
||||||
"sandbox",
|
|
||||||
"spellcheck",
|
|
||||||
"scope",
|
|
||||||
"scoped",
|
|
||||||
"seamless",
|
|
||||||
"selected",
|
|
||||||
"shape",
|
|
||||||
"size",
|
|
||||||
"sizes",
|
|
||||||
"sortable",
|
|
||||||
"sorted",
|
|
||||||
"slot",
|
|
||||||
"span",
|
|
||||||
"spellcheck",
|
|
||||||
"src",
|
|
||||||
"srcdoc",
|
|
||||||
"srclang",
|
|
||||||
"srcset",
|
|
||||||
"start",
|
|
||||||
"step",
|
|
||||||
"style",
|
|
||||||
"tabindex",
|
|
||||||
"target",
|
|
||||||
"title",
|
|
||||||
"translate",
|
|
||||||
"type",
|
|
||||||
"typemustmatch",
|
|
||||||
"updateviacache",
|
|
||||||
"usemap",
|
|
||||||
"value",
|
|
||||||
"width",
|
|
||||||
"workertype",
|
|
||||||
"wrap",
|
|
||||||
}
|
|
||||||
|
|
||||||
// "onautocomplete", "onautocompleteerror", "onmousewheel",
|
|
||||||
// "onshow" and "onsort" have been removed from the spec,
|
|
||||||
// but are kept here for backwards compatibility.
|
|
||||||
var eventHandlers = []string{
|
|
||||||
"onabort",
|
|
||||||
"onautocomplete",
|
|
||||||
"onautocompleteerror",
|
|
||||||
"onauxclick",
|
|
||||||
"onafterprint",
|
|
||||||
"onbeforeprint",
|
|
||||||
"onbeforeunload",
|
|
||||||
"onblur",
|
|
||||||
"oncancel",
|
|
||||||
"oncanplay",
|
|
||||||
"oncanplaythrough",
|
|
||||||
"onchange",
|
|
||||||
"onclick",
|
|
||||||
"onclose",
|
|
||||||
"oncontextmenu",
|
|
||||||
"oncopy",
|
|
||||||
"oncuechange",
|
|
||||||
"oncut",
|
|
||||||
"ondblclick",
|
|
||||||
"ondrag",
|
|
||||||
"ondragend",
|
|
||||||
"ondragenter",
|
|
||||||
"ondragexit",
|
|
||||||
"ondragleave",
|
|
||||||
"ondragover",
|
|
||||||
"ondragstart",
|
|
||||||
"ondrop",
|
|
||||||
"ondurationchange",
|
|
||||||
"onemptied",
|
|
||||||
"onended",
|
|
||||||
"onerror",
|
|
||||||
"onfocus",
|
|
||||||
"onhashchange",
|
|
||||||
"oninput",
|
|
||||||
"oninvalid",
|
|
||||||
"onkeydown",
|
|
||||||
"onkeypress",
|
|
||||||
"onkeyup",
|
|
||||||
"onlanguagechange",
|
|
||||||
"onload",
|
|
||||||
"onloadeddata",
|
|
||||||
"onloadedmetadata",
|
|
||||||
"onloadend",
|
|
||||||
"onloadstart",
|
|
||||||
"onmessage",
|
|
||||||
"onmessageerror",
|
|
||||||
"onmousedown",
|
|
||||||
"onmouseenter",
|
|
||||||
"onmouseleave",
|
|
||||||
"onmousemove",
|
|
||||||
"onmouseout",
|
|
||||||
"onmouseover",
|
|
||||||
"onmouseup",
|
|
||||||
"onmousewheel",
|
|
||||||
"onwheel",
|
|
||||||
"onoffline",
|
|
||||||
"ononline",
|
|
||||||
"onpagehide",
|
|
||||||
"onpageshow",
|
|
||||||
"onpaste",
|
|
||||||
"onpause",
|
|
||||||
"onplay",
|
|
||||||
"onplaying",
|
|
||||||
"onpopstate",
|
|
||||||
"onprogress",
|
|
||||||
"onratechange",
|
|
||||||
"onreset",
|
|
||||||
"onresize",
|
|
||||||
"onrejectionhandled",
|
|
||||||
"onscroll",
|
|
||||||
"onsecuritypolicyviolation",
|
|
||||||
"onseeked",
|
|
||||||
"onseeking",
|
|
||||||
"onselect",
|
|
||||||
"onshow",
|
|
||||||
"onsort",
|
|
||||||
"onstalled",
|
|
||||||
"onstorage",
|
|
||||||
"onsubmit",
|
|
||||||
"onsuspend",
|
|
||||||
"ontimeupdate",
|
|
||||||
"ontoggle",
|
|
||||||
"onunhandledrejection",
|
|
||||||
"onunload",
|
|
||||||
"onvolumechange",
|
|
||||||
"onwaiting",
|
|
||||||
}
|
|
||||||
|
|
||||||
// extra are ad-hoc values not covered by any of the lists above.
|
|
||||||
var extra = []string{
|
|
||||||
"acronym",
|
|
||||||
"align",
|
|
||||||
"annotation",
|
|
||||||
"annotation-xml",
|
|
||||||
"applet",
|
|
||||||
"basefont",
|
|
||||||
"bgsound",
|
|
||||||
"big",
|
|
||||||
"blink",
|
|
||||||
"center",
|
|
||||||
"color",
|
|
||||||
"desc",
|
|
||||||
"face",
|
|
||||||
"font",
|
|
||||||
"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
|
|
||||||
"foreignobject",
|
|
||||||
"frame",
|
|
||||||
"frameset",
|
|
||||||
"image",
|
|
||||||
"isindex",
|
|
||||||
"listing",
|
|
||||||
"malignmark",
|
|
||||||
"marquee",
|
|
||||||
"math",
|
|
||||||
"mglyph",
|
|
||||||
"mi",
|
|
||||||
"mn",
|
|
||||||
"mo",
|
|
||||||
"ms",
|
|
||||||
"mtext",
|
|
||||||
"nobr",
|
|
||||||
"noembed",
|
|
||||||
"noframes",
|
|
||||||
"plaintext",
|
|
||||||
"prompt",
|
|
||||||
"public",
|
|
||||||
"rb",
|
|
||||||
"rtc",
|
|
||||||
"spacer",
|
|
||||||
"strike",
|
|
||||||
"svg",
|
|
||||||
"system",
|
|
||||||
"tt",
|
|
||||||
"xmp",
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
|
|
||||||
//This program must be run after mksyscall.go.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
in1, err := ioutil.ReadFile("syscall_darwin.go")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("can't open syscall_darwin.go: %s", err)
|
|
||||||
}
|
|
||||||
arch := os.Args[1]
|
|
||||||
in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
|
|
||||||
}
|
|
||||||
in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
|
|
||||||
}
|
|
||||||
in := string(in1) + string(in2) + string(in3)
|
|
||||||
|
|
||||||
trampolines := map[string]bool{}
|
|
||||||
|
|
||||||
var out bytes.Buffer
|
|
||||||
|
|
||||||
fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
|
|
||||||
fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
|
|
||||||
fmt.Fprintf(&out, "\n")
|
|
||||||
fmt.Fprintf(&out, "// +build go1.12\n")
|
|
||||||
fmt.Fprintf(&out, "\n")
|
|
||||||
fmt.Fprintf(&out, "#include \"textflag.h\"\n")
|
|
||||||
for _, line := range strings.Split(in, "\n") {
|
|
||||||
if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fn := line[5 : len(line)-13]
|
|
||||||
if !trampolines[fn] {
|
|
||||||
trampolines[fn] = true
|
|
||||||
fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
|
|
||||||
fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,122 +0,0 @@
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// mkpost processes the output of cgo -godefs to
|
|
||||||
// modify the generated types. It is used to clean up
|
|
||||||
// the sys API in an architecture specific manner.
|
|
||||||
//
|
|
||||||
// mkpost is run after cgo -godefs; see README.md.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"go/format"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Get the OS and architecture (using GOARCH_TARGET if it exists)
|
|
||||||
goos := os.Getenv("GOOS")
|
|
||||||
goarch := os.Getenv("GOARCH_TARGET")
|
|
||||||
if goarch == "" {
|
|
||||||
goarch = os.Getenv("GOARCH")
|
|
||||||
}
|
|
||||||
// Check that we are using the Docker-based build system if we should be.
|
|
||||||
if goos == "linux" {
|
|
||||||
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
|
|
||||||
os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n")
|
|
||||||
os.Stderr.WriteString("See README.md\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := ioutil.ReadAll(os.Stdin)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if goos == "aix" {
|
|
||||||
// Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
|
|
||||||
// to avoid having both StTimespec and Timespec.
|
|
||||||
sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
|
|
||||||
b = sttimespec.ReplaceAll(b, []byte("Timespec"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intentionally export __val fields in Fsid and Sigset_t
|
|
||||||
valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`)
|
|
||||||
b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}"))
|
|
||||||
|
|
||||||
// Intentionally export __fds_bits field in FdSet
|
|
||||||
fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
|
|
||||||
b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}"))
|
|
||||||
|
|
||||||
// If we have empty Ptrace structs, we should delete them. Only s390x emits
|
|
||||||
// nonempty Ptrace structs.
|
|
||||||
ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
|
|
||||||
b = ptraceRexexp.ReplaceAll(b, nil)
|
|
||||||
|
|
||||||
// Replace the control_regs union with a blank identifier for now.
|
|
||||||
controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
|
|
||||||
b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
|
|
||||||
|
|
||||||
// Remove fields that are added by glibc
|
|
||||||
// Note that this is unstable as the identifers are private.
|
|
||||||
removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
|
|
||||||
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
|
|
||||||
|
|
||||||
// Convert [65]int8 to [65]byte in Utsname members to simplify
|
|
||||||
// conversion to string; see golang.org/issue/20753
|
|
||||||
convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
|
|
||||||
b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
|
|
||||||
|
|
||||||
// Convert [1024]int8 to [1024]byte in Ptmget members
|
|
||||||
convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
|
|
||||||
b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
|
|
||||||
|
|
||||||
// Remove spare fields (e.g. in Statx_t)
|
|
||||||
spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
|
|
||||||
b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
|
|
||||||
|
|
||||||
// Remove cgo padding fields
|
|
||||||
removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
|
|
||||||
b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
|
|
||||||
|
|
||||||
// Remove padding, hidden, or unused fields
|
|
||||||
removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
|
|
||||||
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
|
|
||||||
|
|
||||||
// Remove the first line of warning from cgo
|
|
||||||
b = b[bytes.IndexByte(b, '\n')+1:]
|
|
||||||
// Modify the command in the header to include:
|
|
||||||
// mkpost, our own warning, and a build tag.
|
|
||||||
replacement := fmt.Sprintf(`$1 | go run mkpost.go
|
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s,%s`, goarch, goos)
|
|
||||||
cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
|
|
||||||
b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
|
|
||||||
|
|
||||||
// Rename Stat_t time fields
|
|
||||||
if goos == "freebsd" && goarch == "386" {
|
|
||||||
// Hide Stat_t.[AMCB]tim_ext fields
|
|
||||||
renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
|
|
||||||
b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
|
|
||||||
}
|
|
||||||
renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
|
|
||||||
b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
|
|
||||||
|
|
||||||
// gofmt
|
|
||||||
b, err = format.Source(b)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Stdout.Write(b)
|
|
||||||
}
|
|
|
@ -1,407 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
This program reads a file containing function prototypes
|
|
||||||
(like syscall_darwin.go) and generates system call bodies.
|
|
||||||
The prototypes are marked by lines beginning with "//sys"
|
|
||||||
and read like func declarations if //sys is replaced by func, but:
|
|
||||||
* The parameter lists must give a name for each argument.
|
|
||||||
This includes return parameters.
|
|
||||||
* The parameter lists must give a type for each argument:
|
|
||||||
the (x, y, z int) shorthand is not allowed.
|
|
||||||
* If the return parameter is an error number, it must be named errno.
|
|
||||||
|
|
||||||
A line beginning with //sysnb is like //sys, except that the
|
|
||||||
goroutine will not be suspended during the execution of the system
|
|
||||||
call. This must only be used for system calls which can never
|
|
||||||
block, as otherwise the system call could cause all goroutines to
|
|
||||||
hang.
|
|
||||||
*/
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
b32 = flag.Bool("b32", false, "32bit big-endian")
|
|
||||||
l32 = flag.Bool("l32", false, "32bit little-endian")
|
|
||||||
plan9 = flag.Bool("plan9", false, "plan9")
|
|
||||||
openbsd = flag.Bool("openbsd", false, "openbsd")
|
|
||||||
netbsd = flag.Bool("netbsd", false, "netbsd")
|
|
||||||
dragonfly = flag.Bool("dragonfly", false, "dragonfly")
|
|
||||||
arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
|
|
||||||
tags = flag.String("tags", "", "build tags")
|
|
||||||
filename = flag.String("output", "", "output file name (standard output if omitted)")
|
|
||||||
)
|
|
||||||
|
|
||||||
// cmdLine returns this programs's commandline arguments
|
|
||||||
func cmdLine() string {
|
|
||||||
return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTags returns build tags
|
|
||||||
func buildTags() string {
|
|
||||||
return *tags
|
|
||||||
}
|
|
||||||
|
|
||||||
// Param is function parameter
|
|
||||||
type Param struct {
|
|
||||||
Name string
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
// usage prints the program usage
|
|
||||||
func usage() {
|
|
||||||
fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseParamList parses parameter list and returns a slice of parameters
|
|
||||||
func parseParamList(list string) []string {
|
|
||||||
list = strings.TrimSpace(list)
|
|
||||||
if list == "" {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseParam splits a parameter into name and type
|
|
||||||
func parseParam(p string) Param {
|
|
||||||
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
|
|
||||||
if ps == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
return Param{ps[1], ps[2]}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Get the OS and architecture (using GOARCH_TARGET if it exists)
|
|
||||||
goos := os.Getenv("GOOS")
|
|
||||||
if goos == "" {
|
|
||||||
fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
goarch := os.Getenv("GOARCH_TARGET")
|
|
||||||
if goarch == "" {
|
|
||||||
goarch = os.Getenv("GOARCH")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that we are using the Docker-based build system if we should
|
|
||||||
if goos == "linux" {
|
|
||||||
if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
|
|
||||||
fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
|
|
||||||
fmt.Fprintf(os.Stderr, "See README.md\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
if len(flag.Args()) <= 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
|
||||||
usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
endianness := ""
|
|
||||||
if *b32 {
|
|
||||||
endianness = "big-endian"
|
|
||||||
} else if *l32 {
|
|
||||||
endianness = "little-endian"
|
|
||||||
}
|
|
||||||
|
|
||||||
libc := false
|
|
||||||
if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") {
|
|
||||||
libc = true
|
|
||||||
}
|
|
||||||
trampolines := map[string]bool{}
|
|
||||||
|
|
||||||
text := ""
|
|
||||||
for _, path := range flag.Args() {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
s := bufio.NewScanner(file)
|
|
||||||
for s.Scan() {
|
|
||||||
t := s.Text()
|
|
||||||
t = strings.TrimSpace(t)
|
|
||||||
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
|
|
||||||
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
|
|
||||||
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Line must be of the form
|
|
||||||
// func Open(path string, mode int, perm int) (fd int, errno error)
|
|
||||||
// Split into name, in params, out params.
|
|
||||||
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
|
|
||||||
if f == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
|
|
||||||
|
|
||||||
// ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
|
|
||||||
if goos == "darwin" && !libc && funct == "ClockGettime" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split argument lists on comma.
|
|
||||||
in := parseParamList(inps)
|
|
||||||
out := parseParamList(outps)
|
|
||||||
|
|
||||||
// Try in vain to keep people from editing this file.
|
|
||||||
// The theory is that they jump into the middle of the file
|
|
||||||
// without reading the header.
|
|
||||||
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
|
||||||
|
|
||||||
// Go function header.
|
|
||||||
outDecl := ""
|
|
||||||
if len(out) > 0 {
|
|
||||||
outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
|
|
||||||
}
|
|
||||||
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
|
|
||||||
|
|
||||||
// Check if err return available
|
|
||||||
errvar := ""
|
|
||||||
for _, param := range out {
|
|
||||||
p := parseParam(param)
|
|
||||||
if p.Type == "error" {
|
|
||||||
errvar = p.Name
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare arguments to Syscall.
|
|
||||||
var args []string
|
|
||||||
n := 0
|
|
||||||
for _, param := range in {
|
|
||||||
p := parseParam(param)
|
|
||||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
|
|
||||||
} else if p.Type == "string" && errvar != "" {
|
|
||||||
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
|
|
||||||
text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
|
|
||||||
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
|
||||||
n++
|
|
||||||
} else if p.Type == "string" {
|
|
||||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
|
|
||||||
text += fmt.Sprintf("\tvar _p%d *byte\n", n)
|
|
||||||
text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
|
||||||
n++
|
|
||||||
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
// Convert slice into pointer, length.
|
|
||||||
// Have to be careful not to take address of &a[0] if len == 0:
|
|
||||||
// pass dummy pointer in that case.
|
|
||||||
// Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
|
|
||||||
text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
|
|
||||||
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
|
|
||||||
text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
|
|
||||||
n++
|
|
||||||
} else if p.Type == "int64" && (*openbsd || *netbsd) {
|
|
||||||
args = append(args, "0")
|
|
||||||
if endianness == "big-endian" {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
} else if endianness == "little-endian" {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
}
|
|
||||||
} else if p.Type == "int64" && *dragonfly {
|
|
||||||
if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
|
|
||||||
args = append(args, "0")
|
|
||||||
}
|
|
||||||
if endianness == "big-endian" {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
} else if endianness == "little-endian" {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
}
|
|
||||||
} else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
|
|
||||||
if len(args)%2 == 1 && *arm {
|
|
||||||
// arm abi specifies 64-bit argument uses
|
|
||||||
// (even, odd) pair
|
|
||||||
args = append(args, "0")
|
|
||||||
}
|
|
||||||
if endianness == "big-endian" {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine which form to use; pad args with zeros.
|
|
||||||
asm := "Syscall"
|
|
||||||
if nonblock != nil {
|
|
||||||
if errvar == "" && goos == "linux" {
|
|
||||||
asm = "RawSyscallNoError"
|
|
||||||
} else {
|
|
||||||
asm = "RawSyscall"
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if errvar == "" && goos == "linux" {
|
|
||||||
asm = "SyscallNoError"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(args) <= 3 {
|
|
||||||
for len(args) < 3 {
|
|
||||||
args = append(args, "0")
|
|
||||||
}
|
|
||||||
} else if len(args) <= 6 {
|
|
||||||
asm += "6"
|
|
||||||
for len(args) < 6 {
|
|
||||||
args = append(args, "0")
|
|
||||||
}
|
|
||||||
} else if len(args) <= 9 {
|
|
||||||
asm += "9"
|
|
||||||
for len(args) < 9 {
|
|
||||||
args = append(args, "0")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
|
|
||||||
}
|
|
||||||
|
|
||||||
// System call number.
|
|
||||||
if sysname == "" {
|
|
||||||
sysname = "SYS_" + funct
|
|
||||||
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
|
|
||||||
sysname = strings.ToUpper(sysname)
|
|
||||||
}
|
|
||||||
|
|
||||||
var libcFn string
|
|
||||||
if libc {
|
|
||||||
asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
|
|
||||||
sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
|
|
||||||
sysname = strings.ToLower(sysname) // lowercase
|
|
||||||
if sysname == "getdirentries64" {
|
|
||||||
// Special case - libSystem name and
|
|
||||||
// raw syscall name don't match.
|
|
||||||
sysname = "__getdirentries64"
|
|
||||||
}
|
|
||||||
libcFn = sysname
|
|
||||||
sysname = "funcPC(libc_" + sysname + "_trampoline)"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Actual call.
|
|
||||||
arglist := strings.Join(args, ", ")
|
|
||||||
call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
|
|
||||||
|
|
||||||
// Assign return values.
|
|
||||||
body := ""
|
|
||||||
ret := []string{"_", "_", "_"}
|
|
||||||
doErrno := false
|
|
||||||
for i := 0; i < len(out); i++ {
|
|
||||||
p := parseParam(out[i])
|
|
||||||
reg := ""
|
|
||||||
if p.Name == "err" && !*plan9 {
|
|
||||||
reg = "e1"
|
|
||||||
ret[2] = reg
|
|
||||||
doErrno = true
|
|
||||||
} else if p.Name == "err" && *plan9 {
|
|
||||||
ret[0] = "r0"
|
|
||||||
ret[2] = "e1"
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
reg = fmt.Sprintf("r%d", i)
|
|
||||||
ret[i] = reg
|
|
||||||
}
|
|
||||||
if p.Type == "bool" {
|
|
||||||
reg = fmt.Sprintf("%s != 0", reg)
|
|
||||||
}
|
|
||||||
if p.Type == "int64" && endianness != "" {
|
|
||||||
// 64-bit number in r1:r0 or r0:r1.
|
|
||||||
if i+2 > len(out) {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
|
|
||||||
}
|
|
||||||
if endianness == "big-endian" {
|
|
||||||
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
|
|
||||||
} else {
|
|
||||||
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
|
|
||||||
}
|
|
||||||
ret[i] = fmt.Sprintf("r%d", i)
|
|
||||||
ret[i+1] = fmt.Sprintf("r%d", i+1)
|
|
||||||
}
|
|
||||||
if reg != "e1" || *plan9 {
|
|
||||||
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
|
|
||||||
text += fmt.Sprintf("\t%s\n", call)
|
|
||||||
} else {
|
|
||||||
if errvar == "" && goos == "linux" {
|
|
||||||
// raw syscall without error on Linux, see golang.org/issue/22924
|
|
||||||
text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
|
|
||||||
} else {
|
|
||||||
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
text += body
|
|
||||||
|
|
||||||
if *plan9 && ret[2] == "e1" {
|
|
||||||
text += "\tif int32(r0) == -1 {\n"
|
|
||||||
text += "\t\terr = e1\n"
|
|
||||||
text += "\t}\n"
|
|
||||||
} else if doErrno {
|
|
||||||
text += "\tif e1 != 0 {\n"
|
|
||||||
text += "\t\terr = errnoErr(e1)\n"
|
|
||||||
text += "\t}\n"
|
|
||||||
}
|
|
||||||
text += "\treturn\n"
|
|
||||||
text += "}\n\n"
|
|
||||||
|
|
||||||
if libc && !trampolines[libcFn] {
|
|
||||||
// some system calls share a trampoline, like read and readlen.
|
|
||||||
trampolines[libcFn] = true
|
|
||||||
// Declare assembly trampoline.
|
|
||||||
text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
|
|
||||||
// Assembly trampoline calls the libc_* function, which this magic
|
|
||||||
// redirects to use the function from libSystem.
|
|
||||||
text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn)
|
|
||||||
text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
|
|
||||||
text += "\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
file.Close()
|
|
||||||
}
|
|
||||||
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
|
|
||||||
}
|
|
||||||
|
|
||||||
const srcTemplate = `// %s
|
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ syscall.Errno
|
|
||||||
|
|
||||||
%s
|
|
||||||
`
|
|
|
@ -1,415 +0,0 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
This program reads a file containing function prototypes
|
|
||||||
(like syscall_aix.go) and generates system call bodies.
|
|
||||||
The prototypes are marked by lines beginning with "//sys"
|
|
||||||
and read like func declarations if //sys is replaced by func, but:
|
|
||||||
* The parameter lists must give a name for each argument.
|
|
||||||
This includes return parameters.
|
|
||||||
* The parameter lists must give a type for each argument:
|
|
||||||
the (x, y, z int) shorthand is not allowed.
|
|
||||||
* If the return parameter is an error number, it must be named err.
|
|
||||||
* If go func name needs to be different than its libc name,
|
|
||||||
* or the function is not in libc, name could be specified
|
|
||||||
* at the end, after "=" sign, like
|
|
||||||
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
|
|
||||||
*/
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
b32 = flag.Bool("b32", false, "32bit big-endian")
|
|
||||||
l32 = flag.Bool("l32", false, "32bit little-endian")
|
|
||||||
aix = flag.Bool("aix", false, "aix")
|
|
||||||
tags = flag.String("tags", "", "build tags")
|
|
||||||
)
|
|
||||||
|
|
||||||
// cmdLine returns this programs's commandline arguments
|
|
||||||
func cmdLine() string {
|
|
||||||
return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTags returns build tags
|
|
||||||
func buildTags() string {
|
|
||||||
return *tags
|
|
||||||
}
|
|
||||||
|
|
||||||
// Param is function parameter
|
|
||||||
type Param struct {
|
|
||||||
Name string
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
// usage prints the program usage
|
|
||||||
func usage() {
|
|
||||||
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseParamList parses parameter list and returns a slice of parameters
|
|
||||||
func parseParamList(list string) []string {
|
|
||||||
list = strings.TrimSpace(list)
|
|
||||||
if list == "" {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseParam splits a parameter into name and type
|
|
||||||
func parseParam(p string) Param {
|
|
||||||
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
|
|
||||||
if ps == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
return Param{ps[1], ps[2]}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
if len(flag.Args()) <= 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
|
||||||
usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
endianness := ""
|
|
||||||
if *b32 {
|
|
||||||
endianness = "big-endian"
|
|
||||||
} else if *l32 {
|
|
||||||
endianness = "little-endian"
|
|
||||||
}
|
|
||||||
|
|
||||||
pack := ""
|
|
||||||
text := ""
|
|
||||||
cExtern := "/*\n#include <stdint.h>\n#include <stddef.h>\n"
|
|
||||||
for _, path := range flag.Args() {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
s := bufio.NewScanner(file)
|
|
||||||
for s.Scan() {
|
|
||||||
t := s.Text()
|
|
||||||
t = strings.TrimSpace(t)
|
|
||||||
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
|
|
||||||
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
|
|
||||||
pack = p[1]
|
|
||||||
}
|
|
||||||
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
|
|
||||||
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Line must be of the form
|
|
||||||
// func Open(path string, mode int, perm int) (fd int, err error)
|
|
||||||
// Split into name, in params, out params.
|
|
||||||
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
|
|
||||||
if f == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
|
|
||||||
|
|
||||||
// Split argument lists on comma.
|
|
||||||
in := parseParamList(inps)
|
|
||||||
out := parseParamList(outps)
|
|
||||||
|
|
||||||
inps = strings.Join(in, ", ")
|
|
||||||
outps = strings.Join(out, ", ")
|
|
||||||
|
|
||||||
// Try in vain to keep people from editing this file.
|
|
||||||
// The theory is that they jump into the middle of the file
|
|
||||||
// without reading the header.
|
|
||||||
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
|
||||||
|
|
||||||
// Check if value return, err return available
|
|
||||||
errvar := ""
|
|
||||||
retvar := ""
|
|
||||||
rettype := ""
|
|
||||||
for _, param := range out {
|
|
||||||
p := parseParam(param)
|
|
||||||
if p.Type == "error" {
|
|
||||||
errvar = p.Name
|
|
||||||
} else {
|
|
||||||
retvar = p.Name
|
|
||||||
rettype = p.Type
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// System call name.
|
|
||||||
if sysname == "" {
|
|
||||||
sysname = funct
|
|
||||||
}
|
|
||||||
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
|
|
||||||
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
|
|
||||||
|
|
||||||
cRettype := ""
|
|
||||||
if rettype == "unsafe.Pointer" {
|
|
||||||
cRettype = "uintptr_t"
|
|
||||||
} else if rettype == "uintptr" {
|
|
||||||
cRettype = "uintptr_t"
|
|
||||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
|
|
||||||
cRettype = "uintptr_t"
|
|
||||||
} else if rettype == "int" {
|
|
||||||
cRettype = "int"
|
|
||||||
} else if rettype == "int32" {
|
|
||||||
cRettype = "int"
|
|
||||||
} else if rettype == "int64" {
|
|
||||||
cRettype = "long long"
|
|
||||||
} else if rettype == "uint32" {
|
|
||||||
cRettype = "unsigned int"
|
|
||||||
} else if rettype == "uint64" {
|
|
||||||
cRettype = "unsigned long long"
|
|
||||||
} else {
|
|
||||||
cRettype = "int"
|
|
||||||
}
|
|
||||||
if sysname == "exit" {
|
|
||||||
cRettype = "void"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Change p.Types to c
|
|
||||||
var cIn []string
|
|
||||||
for _, param := range in {
|
|
||||||
p := parseParam(param)
|
|
||||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if p.Type == "string" {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
cIn = append(cIn, "uintptr_t", "size_t")
|
|
||||||
} else if p.Type == "unsafe.Pointer" {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if p.Type == "uintptr" {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if p.Type == "int" {
|
|
||||||
cIn = append(cIn, "int")
|
|
||||||
} else if p.Type == "int32" {
|
|
||||||
cIn = append(cIn, "int")
|
|
||||||
} else if p.Type == "int64" {
|
|
||||||
cIn = append(cIn, "long long")
|
|
||||||
} else if p.Type == "uint32" {
|
|
||||||
cIn = append(cIn, "unsigned int")
|
|
||||||
} else if p.Type == "uint64" {
|
|
||||||
cIn = append(cIn, "unsigned long long")
|
|
||||||
} else {
|
|
||||||
cIn = append(cIn, "int")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
|
|
||||||
if sysname == "select" {
|
|
||||||
// select is a keyword of Go. Its name is
|
|
||||||
// changed to c_select.
|
|
||||||
cExtern += "#define c_select select\n"
|
|
||||||
}
|
|
||||||
// Imports of system calls from libc
|
|
||||||
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
|
|
||||||
cIn := strings.Join(cIn, ", ")
|
|
||||||
cExtern += fmt.Sprintf("(%s);\n", cIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// So file name.
|
|
||||||
if *aix {
|
|
||||||
if modname == "" {
|
|
||||||
modname = "libc.a/shr_64.o"
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
strconvfunc := "C.CString"
|
|
||||||
|
|
||||||
// Go function header.
|
|
||||||
if outps != "" {
|
|
||||||
outps = fmt.Sprintf(" (%s)", outps)
|
|
||||||
}
|
|
||||||
if text != "" {
|
|
||||||
text += "\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
|
|
||||||
|
|
||||||
// Prepare arguments to Syscall.
|
|
||||||
var args []string
|
|
||||||
n := 0
|
|
||||||
argN := 0
|
|
||||||
for _, param := range in {
|
|
||||||
p := parseParam(param)
|
|
||||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))")
|
|
||||||
} else if p.Type == "string" && errvar != "" {
|
|
||||||
text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
|
|
||||||
args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
|
|
||||||
n++
|
|
||||||
} else if p.Type == "string" {
|
|
||||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
|
|
||||||
text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
|
|
||||||
args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
|
|
||||||
n++
|
|
||||||
} else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
|
|
||||||
// Convert slice into pointer, length.
|
|
||||||
// Have to be careful not to take address of &a[0] if len == 0:
|
|
||||||
// pass nil in that case.
|
|
||||||
text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
|
|
||||||
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
|
|
||||||
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n))
|
|
||||||
n++
|
|
||||||
text += fmt.Sprintf("\tvar _p%d int\n", n)
|
|
||||||
text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name)
|
|
||||||
args = append(args, fmt.Sprintf("C.size_t(_p%d)", n))
|
|
||||||
n++
|
|
||||||
} else if p.Type == "int64" && endianness != "" {
|
|
||||||
if endianness == "big-endian" {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
} else if p.Type == "bool" {
|
|
||||||
text += fmt.Sprintf("\tvar _p%d uint32\n", n)
|
|
||||||
text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
|
|
||||||
args = append(args, fmt.Sprintf("_p%d", n))
|
|
||||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
|
|
||||||
} else if p.Type == "unsafe.Pointer" {
|
|
||||||
args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
|
|
||||||
} else if p.Type == "int" {
|
|
||||||
if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) {
|
|
||||||
args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name))
|
|
||||||
} else if argN == 0 && funct == "fcntl" {
|
|
||||||
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
|
||||||
} else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) {
|
|
||||||
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
|
|
||||||
}
|
|
||||||
} else if p.Type == "int32" {
|
|
||||||
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
|
|
||||||
} else if p.Type == "int64" {
|
|
||||||
args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name))
|
|
||||||
} else if p.Type == "uint32" {
|
|
||||||
args = append(args, fmt.Sprintf("C.uint(%s)", p.Name))
|
|
||||||
} else if p.Type == "uint64" {
|
|
||||||
args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name))
|
|
||||||
} else if p.Type == "uintptr" {
|
|
||||||
args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
|
|
||||||
}
|
|
||||||
argN++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Actual call.
|
|
||||||
arglist := strings.Join(args, ", ")
|
|
||||||
call := ""
|
|
||||||
if sysname == "exit" {
|
|
||||||
if errvar != "" {
|
|
||||||
call += "er :="
|
|
||||||
} else {
|
|
||||||
call += ""
|
|
||||||
}
|
|
||||||
} else if errvar != "" {
|
|
||||||
call += "r0,er :="
|
|
||||||
} else if retvar != "" {
|
|
||||||
call += "r0,_ :="
|
|
||||||
} else {
|
|
||||||
call += ""
|
|
||||||
}
|
|
||||||
if sysname == "select" {
|
|
||||||
// select is a keyword of Go. Its name is
|
|
||||||
// changed to c_select.
|
|
||||||
call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
|
|
||||||
} else {
|
|
||||||
call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assign return values.
|
|
||||||
body := ""
|
|
||||||
for i := 0; i < len(out); i++ {
|
|
||||||
p := parseParam(out[i])
|
|
||||||
reg := ""
|
|
||||||
if p.Name == "err" {
|
|
||||||
reg = "e1"
|
|
||||||
} else {
|
|
||||||
reg = "r0"
|
|
||||||
}
|
|
||||||
if reg != "e1" {
|
|
||||||
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify return
|
|
||||||
if sysname != "exit" && errvar != "" {
|
|
||||||
if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil {
|
|
||||||
body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n"
|
|
||||||
body += fmt.Sprintf("\t\t%s = er\n", errvar)
|
|
||||||
body += "\t}\n"
|
|
||||||
} else {
|
|
||||||
body += "\tif (r0 ==-1 && er != nil) {\n"
|
|
||||||
body += fmt.Sprintf("\t\t%s = er\n", errvar)
|
|
||||||
body += "\t}\n"
|
|
||||||
}
|
|
||||||
} else if errvar != "" {
|
|
||||||
body += "\tif (er != nil) {\n"
|
|
||||||
body += fmt.Sprintf("\t\t%s = er\n", errvar)
|
|
||||||
body += "\t}\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
text += fmt.Sprintf("\t%s\n", call)
|
|
||||||
text += body
|
|
||||||
|
|
||||||
text += "\treturn\n"
|
|
||||||
text += "}\n"
|
|
||||||
}
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
file.Close()
|
|
||||||
}
|
|
||||||
imp := ""
|
|
||||||
if pack != "unix" {
|
|
||||||
imp = "import \"golang.org/x/sys/unix\"\n"
|
|
||||||
|
|
||||||
}
|
|
||||||
fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
const srcTemplate = `// %s
|
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s
|
|
||||||
|
|
||||||
package %s
|
|
||||||
|
|
||||||
|
|
||||||
%s
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
import (
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
%s
|
|
||||||
|
|
||||||
%s
|
|
||||||
`
|
|
|
@ -1,614 +0,0 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
This program reads a file containing function prototypes
|
|
||||||
(like syscall_aix.go) and generates system call bodies.
|
|
||||||
The prototypes are marked by lines beginning with "//sys"
|
|
||||||
and read like func declarations if //sys is replaced by func, but:
|
|
||||||
* The parameter lists must give a name for each argument.
|
|
||||||
This includes return parameters.
|
|
||||||
* The parameter lists must give a type for each argument:
|
|
||||||
the (x, y, z int) shorthand is not allowed.
|
|
||||||
* If the return parameter is an error number, it must be named err.
|
|
||||||
* If go func name needs to be different than its libc name,
|
|
||||||
* or the function is not in libc, name could be specified
|
|
||||||
* at the end, after "=" sign, like
|
|
||||||
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
|
|
||||||
|
|
||||||
|
|
||||||
This program will generate three files and handle both gc and gccgo implementation:
|
|
||||||
- zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
|
|
||||||
- zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
|
|
||||||
- zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
|
|
||||||
|
|
||||||
The generated code looks like this
|
|
||||||
|
|
||||||
zsyscall_aix_ppc64.go
|
|
||||||
func asyscall(...) (n int, err error) {
|
|
||||||
// Pointer Creation
|
|
||||||
r1, e1 := callasyscall(...)
|
|
||||||
// Type Conversion
|
|
||||||
// Error Handler
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
zsyscall_aix_ppc64_gc.go
|
|
||||||
//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
|
|
||||||
//go:linkname libc_asyscall libc_asyscall
|
|
||||||
var asyscall syscallFunc
|
|
||||||
|
|
||||||
func callasyscall(...) (r1 uintptr, e1 Errno) {
|
|
||||||
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
zsyscall_aix_ppc64_ggcgo.go
|
|
||||||
|
|
||||||
// int asyscall(...)
|
|
||||||
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
func callasyscall(...) (r1 uintptr, e1 Errno) {
|
|
||||||
r1 = uintptr(C.asyscall(...))
|
|
||||||
e1 = syscall.GetErrno()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
b32 = flag.Bool("b32", false, "32bit big-endian")
|
|
||||||
l32 = flag.Bool("l32", false, "32bit little-endian")
|
|
||||||
aix = flag.Bool("aix", false, "aix")
|
|
||||||
tags = flag.String("tags", "", "build tags")
|
|
||||||
)
|
|
||||||
|
|
||||||
// cmdLine returns this programs's commandline arguments
|
|
||||||
func cmdLine() string {
|
|
||||||
return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTags returns build tags
|
|
||||||
func buildTags() string {
|
|
||||||
return *tags
|
|
||||||
}
|
|
||||||
|
|
||||||
// Param is function parameter
|
|
||||||
type Param struct {
|
|
||||||
Name string
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
// usage prints the program usage
|
|
||||||
func usage() {
|
|
||||||
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseParamList parses parameter list and returns a slice of parameters
|
|
||||||
func parseParamList(list string) []string {
|
|
||||||
list = strings.TrimSpace(list)
|
|
||||||
if list == "" {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseParam splits a parameter into name and type
|
|
||||||
func parseParam(p string) Param {
|
|
||||||
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
|
|
||||||
if ps == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
return Param{ps[1], ps[2]}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
if len(flag.Args()) <= 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
|
||||||
usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
endianness := ""
|
|
||||||
if *b32 {
|
|
||||||
endianness = "big-endian"
|
|
||||||
} else if *l32 {
|
|
||||||
endianness = "little-endian"
|
|
||||||
}
|
|
||||||
|
|
||||||
pack := ""
|
|
||||||
// GCCGO
|
|
||||||
textgccgo := ""
|
|
||||||
cExtern := "/*\n#include <stdint.h>\n"
|
|
||||||
// GC
|
|
||||||
textgc := ""
|
|
||||||
dynimports := ""
|
|
||||||
linknames := ""
|
|
||||||
var vars []string
|
|
||||||
// COMMON
|
|
||||||
textcommon := ""
|
|
||||||
for _, path := range flag.Args() {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
s := bufio.NewScanner(file)
|
|
||||||
for s.Scan() {
|
|
||||||
t := s.Text()
|
|
||||||
t = strings.TrimSpace(t)
|
|
||||||
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
|
|
||||||
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
|
|
||||||
pack = p[1]
|
|
||||||
}
|
|
||||||
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
|
|
||||||
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Line must be of the form
|
|
||||||
// func Open(path string, mode int, perm int) (fd int, err error)
|
|
||||||
// Split into name, in params, out params.
|
|
||||||
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
|
|
||||||
if f == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
|
|
||||||
|
|
||||||
// Split argument lists on comma.
|
|
||||||
in := parseParamList(inps)
|
|
||||||
out := parseParamList(outps)
|
|
||||||
|
|
||||||
inps = strings.Join(in, ", ")
|
|
||||||
outps = strings.Join(out, ", ")
|
|
||||||
|
|
||||||
if sysname == "" {
|
|
||||||
sysname = funct
|
|
||||||
}
|
|
||||||
|
|
||||||
onlyCommon := false
|
|
||||||
if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" {
|
|
||||||
// This function call another syscall which is already implemented.
|
|
||||||
// Therefore, the gc and gccgo part must not be generated.
|
|
||||||
onlyCommon = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try in vain to keep people from editing this file.
|
|
||||||
// The theory is that they jump into the middle of the file
|
|
||||||
// without reading the header.
|
|
||||||
|
|
||||||
textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
|
||||||
if !onlyCommon {
|
|
||||||
textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
|
||||||
textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if value return, err return available
|
|
||||||
errvar := ""
|
|
||||||
rettype := ""
|
|
||||||
for _, param := range out {
|
|
||||||
p := parseParam(param)
|
|
||||||
if p.Type == "error" {
|
|
||||||
errvar = p.Name
|
|
||||||
} else {
|
|
||||||
rettype = p.Type
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
|
|
||||||
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
|
|
||||||
|
|
||||||
// GCCGO Prototype return type
|
|
||||||
cRettype := ""
|
|
||||||
if rettype == "unsafe.Pointer" {
|
|
||||||
cRettype = "uintptr_t"
|
|
||||||
} else if rettype == "uintptr" {
|
|
||||||
cRettype = "uintptr_t"
|
|
||||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
|
|
||||||
cRettype = "uintptr_t"
|
|
||||||
} else if rettype == "int" {
|
|
||||||
cRettype = "int"
|
|
||||||
} else if rettype == "int32" {
|
|
||||||
cRettype = "int"
|
|
||||||
} else if rettype == "int64" {
|
|
||||||
cRettype = "long long"
|
|
||||||
} else if rettype == "uint32" {
|
|
||||||
cRettype = "unsigned int"
|
|
||||||
} else if rettype == "uint64" {
|
|
||||||
cRettype = "unsigned long long"
|
|
||||||
} else {
|
|
||||||
cRettype = "int"
|
|
||||||
}
|
|
||||||
if sysname == "exit" {
|
|
||||||
cRettype = "void"
|
|
||||||
}
|
|
||||||
|
|
||||||
// GCCGO Prototype arguments type
|
|
||||||
var cIn []string
|
|
||||||
for i, param := range in {
|
|
||||||
p := parseParam(param)
|
|
||||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if p.Type == "string" {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
cIn = append(cIn, "uintptr_t", "size_t")
|
|
||||||
} else if p.Type == "unsafe.Pointer" {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if p.Type == "uintptr" {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else if p.Type == "int" {
|
|
||||||
if (i == 0 || i == 2) && funct == "fcntl" {
|
|
||||||
// These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
|
|
||||||
cIn = append(cIn, "uintptr_t")
|
|
||||||
} else {
|
|
||||||
cIn = append(cIn, "int")
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if p.Type == "int32" {
|
|
||||||
cIn = append(cIn, "int")
|
|
||||||
} else if p.Type == "int64" {
|
|
||||||
cIn = append(cIn, "long long")
|
|
||||||
} else if p.Type == "uint32" {
|
|
||||||
cIn = append(cIn, "unsigned int")
|
|
||||||
} else if p.Type == "uint64" {
|
|
||||||
cIn = append(cIn, "unsigned long long")
|
|
||||||
} else {
|
|
||||||
cIn = append(cIn, "int")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !onlyCommon {
|
|
||||||
// GCCGO Prototype Generation
|
|
||||||
// Imports of system calls from libc
|
|
||||||
if sysname == "select" {
|
|
||||||
// select is a keyword of Go. Its name is
|
|
||||||
// changed to c_select.
|
|
||||||
cExtern += "#define c_select select\n"
|
|
||||||
}
|
|
||||||
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
|
|
||||||
cIn := strings.Join(cIn, ", ")
|
|
||||||
cExtern += fmt.Sprintf("(%s);\n", cIn)
|
|
||||||
}
|
|
||||||
// GC Library name
|
|
||||||
if modname == "" {
|
|
||||||
modname = "libc.a/shr_64.o"
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
sysvarname := fmt.Sprintf("libc_%s", sysname)
|
|
||||||
|
|
||||||
if !onlyCommon {
|
|
||||||
// GC Runtime import of function to allow cross-platform builds.
|
|
||||||
dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname)
|
|
||||||
// GC Link symbol to proc address variable.
|
|
||||||
linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname)
|
|
||||||
// GC Library proc address variable.
|
|
||||||
vars = append(vars, sysvarname)
|
|
||||||
}
|
|
||||||
|
|
||||||
strconvfunc := "BytePtrFromString"
|
|
||||||
strconvtype := "*byte"
|
|
||||||
|
|
||||||
// Go function header.
|
|
||||||
if outps != "" {
|
|
||||||
outps = fmt.Sprintf(" (%s)", outps)
|
|
||||||
}
|
|
||||||
if textcommon != "" {
|
|
||||||
textcommon += "\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
|
|
||||||
|
|
||||||
// Prepare arguments tocall.
|
|
||||||
var argscommon []string // Arguments in the common part
|
|
||||||
var argscall []string // Arguments for call prototype
|
|
||||||
var argsgc []string // Arguments for gc call (with syscall6)
|
|
||||||
var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall)
|
|
||||||
n := 0
|
|
||||||
argN := 0
|
|
||||||
for _, param := range in {
|
|
||||||
p := parseParam(param)
|
|
||||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name))
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
|
|
||||||
argsgc = append(argsgc, p.Name)
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
|
||||||
} else if p.Type == "string" && errvar != "" {
|
|
||||||
textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
|
|
||||||
textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
|
|
||||||
textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
|
|
||||||
|
|
||||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
|
||||||
argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
|
|
||||||
n++
|
|
||||||
} else if p.Type == "string" {
|
|
||||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
|
|
||||||
textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
|
|
||||||
textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
|
|
||||||
textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
|
|
||||||
|
|
||||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
|
||||||
argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
|
|
||||||
n++
|
|
||||||
} else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
|
|
||||||
// Convert slice into pointer, length.
|
|
||||||
// Have to be careful not to take address of &a[0] if len == 0:
|
|
||||||
// pass nil in that case.
|
|
||||||
textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
|
|
||||||
textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
|
|
||||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name))
|
|
||||||
argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n))
|
|
||||||
n++
|
|
||||||
} else if p.Type == "int64" && endianness != "" {
|
|
||||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n")
|
|
||||||
} else if p.Type == "bool" {
|
|
||||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n")
|
|
||||||
} else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" {
|
|
||||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
|
|
||||||
argsgc = append(argsgc, p.Name)
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
|
||||||
} else if p.Type == "int" {
|
|
||||||
if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) {
|
|
||||||
// These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
|
|
||||||
argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
|
|
||||||
argsgc = append(argsgc, p.Name)
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
|
||||||
|
|
||||||
} else {
|
|
||||||
argscommon = append(argscommon, p.Name)
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
|
|
||||||
}
|
|
||||||
} else if p.Type == "int32" {
|
|
||||||
argscommon = append(argscommon, p.Name)
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s int32", p.Name))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
|
|
||||||
} else if p.Type == "int64" {
|
|
||||||
argscommon = append(argscommon, p.Name)
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s int64", p.Name))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name))
|
|
||||||
} else if p.Type == "uint32" {
|
|
||||||
argscommon = append(argscommon, p.Name)
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name))
|
|
||||||
} else if p.Type == "uint64" {
|
|
||||||
argscommon = append(argscommon, p.Name)
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name))
|
|
||||||
} else if p.Type == "uintptr" {
|
|
||||||
argscommon = append(argscommon, p.Name)
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
|
|
||||||
argsgc = append(argsgc, p.Name)
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
|
|
||||||
} else {
|
|
||||||
argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name))
|
|
||||||
argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
|
|
||||||
argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
|
|
||||||
}
|
|
||||||
argN++
|
|
||||||
}
|
|
||||||
nargs := len(argsgc)
|
|
||||||
|
|
||||||
// COMMON function generation
|
|
||||||
argscommonlist := strings.Join(argscommon, ", ")
|
|
||||||
callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist)
|
|
||||||
ret := []string{"_", "_"}
|
|
||||||
body := ""
|
|
||||||
doErrno := false
|
|
||||||
for i := 0; i < len(out); i++ {
|
|
||||||
p := parseParam(out[i])
|
|
||||||
reg := ""
|
|
||||||
if p.Name == "err" {
|
|
||||||
reg = "e1"
|
|
||||||
ret[1] = reg
|
|
||||||
doErrno = true
|
|
||||||
} else {
|
|
||||||
reg = "r0"
|
|
||||||
ret[0] = reg
|
|
||||||
}
|
|
||||||
if p.Type == "bool" {
|
|
||||||
reg = fmt.Sprintf("%s != 0", reg)
|
|
||||||
}
|
|
||||||
if reg != "e1" {
|
|
||||||
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ret[0] == "_" && ret[1] == "_" {
|
|
||||||
textcommon += fmt.Sprintf("\t%s\n", callcommon)
|
|
||||||
} else {
|
|
||||||
textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon)
|
|
||||||
}
|
|
||||||
textcommon += body
|
|
||||||
|
|
||||||
if doErrno {
|
|
||||||
textcommon += "\tif e1 != 0 {\n"
|
|
||||||
textcommon += "\t\terr = errnoErr(e1)\n"
|
|
||||||
textcommon += "\t}\n"
|
|
||||||
}
|
|
||||||
textcommon += "\treturn\n"
|
|
||||||
textcommon += "}\n"
|
|
||||||
|
|
||||||
if onlyCommon {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// CALL Prototype
|
|
||||||
callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", "))
|
|
||||||
|
|
||||||
// GC function generation
|
|
||||||
asm := "syscall6"
|
|
||||||
if nonblock != nil {
|
|
||||||
asm = "rawSyscall6"
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(argsgc) <= 6 {
|
|
||||||
for len(argsgc) < 6 {
|
|
||||||
argsgc = append(argsgc, "0")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
argsgclist := strings.Join(argsgc, ", ")
|
|
||||||
callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist)
|
|
||||||
|
|
||||||
textgc += callProto
|
|
||||||
textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc)
|
|
||||||
textgc += "\treturn\n}\n"
|
|
||||||
|
|
||||||
// GCCGO function generation
|
|
||||||
argsgccgolist := strings.Join(argsgccgo, ", ")
|
|
||||||
var callgccgo string
|
|
||||||
if sysname == "select" {
|
|
||||||
// select is a keyword of Go. Its name is
|
|
||||||
// changed to c_select.
|
|
||||||
callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
|
|
||||||
} else {
|
|
||||||
callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
|
|
||||||
}
|
|
||||||
textgccgo += callProto
|
|
||||||
textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
|
|
||||||
textgccgo += "\te1 = syscall.GetErrno()\n"
|
|
||||||
textgccgo += "\treturn\n}\n"
|
|
||||||
}
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
file.Close()
|
|
||||||
}
|
|
||||||
imp := ""
|
|
||||||
if pack != "unix" {
|
|
||||||
imp = "import \"golang.org/x/sys/unix\"\n"
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print zsyscall_aix_ppc64.go
|
|
||||||
err := ioutil.WriteFile("zsyscall_aix_ppc64.go",
|
|
||||||
[]byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)),
|
|
||||||
0644)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print zsyscall_aix_ppc64_gc.go
|
|
||||||
vardecls := "\t" + strings.Join(vars, ",\n\t")
|
|
||||||
vardecls += " syscallFunc"
|
|
||||||
err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go",
|
|
||||||
[]byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)),
|
|
||||||
0644)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print zsyscall_aix_ppc64_gccgo.go
|
|
||||||
err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go",
|
|
||||||
[]byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)),
|
|
||||||
0644)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const srcTemplate1 = `// %s
|
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s
|
|
||||||
|
|
||||||
package %s
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
%s
|
|
||||||
|
|
||||||
%s
|
|
||||||
`
|
|
||||||
const srcTemplate2 = `// %s
|
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s
|
|
||||||
// +build !gccgo
|
|
||||||
|
|
||||||
package %s
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
%s
|
|
||||||
%s
|
|
||||||
%s
|
|
||||||
type syscallFunc uintptr
|
|
||||||
|
|
||||||
var (
|
|
||||||
%s
|
|
||||||
)
|
|
||||||
|
|
||||||
// Implemented in runtime/syscall_aix.go.
|
|
||||||
func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
|
|
||||||
func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
|
|
||||||
|
|
||||||
%s
|
|
||||||
`
|
|
||||||
const srcTemplate3 = `// %s
|
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s
|
|
||||||
// +build gccgo
|
|
||||||
|
|
||||||
package %s
|
|
||||||
|
|
||||||
%s
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
%s
|
|
||||||
|
|
||||||
%s
|
|
||||||
`
|
|
|
@ -1,335 +0,0 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
This program reads a file containing function prototypes
|
|
||||||
(like syscall_solaris.go) and generates system call bodies.
|
|
||||||
The prototypes are marked by lines beginning with "//sys"
|
|
||||||
and read like func declarations if //sys is replaced by func, but:
|
|
||||||
* The parameter lists must give a name for each argument.
|
|
||||||
This includes return parameters.
|
|
||||||
* The parameter lists must give a type for each argument:
|
|
||||||
the (x, y, z int) shorthand is not allowed.
|
|
||||||
* If the return parameter is an error number, it must be named err.
|
|
||||||
* If go func name needs to be different than its libc name,
|
|
||||||
* or the function is not in libc, name could be specified
|
|
||||||
* at the end, after "=" sign, like
|
|
||||||
//sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
b32 = flag.Bool("b32", false, "32bit big-endian")
|
|
||||||
l32 = flag.Bool("l32", false, "32bit little-endian")
|
|
||||||
tags = flag.String("tags", "", "build tags")
|
|
||||||
)
|
|
||||||
|
|
||||||
// cmdLine returns this programs's commandline arguments
|
|
||||||
func cmdLine() string {
|
|
||||||
return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTags returns build tags
|
|
||||||
func buildTags() string {
|
|
||||||
return *tags
|
|
||||||
}
|
|
||||||
|
|
||||||
// Param is function parameter
|
|
||||||
type Param struct {
|
|
||||||
Name string
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
// usage prints the program usage
|
|
||||||
func usage() {
|
|
||||||
fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseParamList parses parameter list and returns a slice of parameters
|
|
||||||
func parseParamList(list string) []string {
|
|
||||||
list = strings.TrimSpace(list)
|
|
||||||
if list == "" {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseParam splits a parameter into name and type
|
|
||||||
func parseParam(p string) Param {
|
|
||||||
ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
|
|
||||||
if ps == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
return Param{ps[1], ps[2]}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
if len(flag.Args()) <= 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
|
||||||
usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
endianness := ""
|
|
||||||
if *b32 {
|
|
||||||
endianness = "big-endian"
|
|
||||||
} else if *l32 {
|
|
||||||
endianness = "little-endian"
|
|
||||||
}
|
|
||||||
|
|
||||||
pack := ""
|
|
||||||
text := ""
|
|
||||||
dynimports := ""
|
|
||||||
linknames := ""
|
|
||||||
var vars []string
|
|
||||||
for _, path := range flag.Args() {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
s := bufio.NewScanner(file)
|
|
||||||
for s.Scan() {
|
|
||||||
t := s.Text()
|
|
||||||
t = strings.TrimSpace(t)
|
|
||||||
t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
|
|
||||||
if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
|
|
||||||
pack = p[1]
|
|
||||||
}
|
|
||||||
nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
|
|
||||||
if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Line must be of the form
|
|
||||||
// func Open(path string, mode int, perm int) (fd int, err error)
|
|
||||||
// Split into name, in params, out params.
|
|
||||||
f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
|
|
||||||
if f == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
|
|
||||||
|
|
||||||
// Split argument lists on comma.
|
|
||||||
in := parseParamList(inps)
|
|
||||||
out := parseParamList(outps)
|
|
||||||
|
|
||||||
inps = strings.Join(in, ", ")
|
|
||||||
outps = strings.Join(out, ", ")
|
|
||||||
|
|
||||||
// Try in vain to keep people from editing this file.
|
|
||||||
// The theory is that they jump into the middle of the file
|
|
||||||
// without reading the header.
|
|
||||||
text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
|
|
||||||
|
|
||||||
// So file name.
|
|
||||||
if modname == "" {
|
|
||||||
modname = "libc"
|
|
||||||
}
|
|
||||||
|
|
||||||
// System call name.
|
|
||||||
if sysname == "" {
|
|
||||||
sysname = funct
|
|
||||||
}
|
|
||||||
|
|
||||||
// System call pointer variable name.
|
|
||||||
sysvarname := fmt.Sprintf("proc%s", sysname)
|
|
||||||
|
|
||||||
strconvfunc := "BytePtrFromString"
|
|
||||||
strconvtype := "*byte"
|
|
||||||
|
|
||||||
sysname = strings.ToLower(sysname) // All libc functions are lowercase.
|
|
||||||
|
|
||||||
// Runtime import of function to allow cross-platform builds.
|
|
||||||
dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname)
|
|
||||||
// Link symbol to proc address variable.
|
|
||||||
linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname)
|
|
||||||
// Library proc address variable.
|
|
||||||
vars = append(vars, sysvarname)
|
|
||||||
|
|
||||||
// Go function header.
|
|
||||||
outlist := strings.Join(out, ", ")
|
|
||||||
if outlist != "" {
|
|
||||||
outlist = fmt.Sprintf(" (%s)", outlist)
|
|
||||||
}
|
|
||||||
if text != "" {
|
|
||||||
text += "\n"
|
|
||||||
}
|
|
||||||
text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist)
|
|
||||||
|
|
||||||
// Check if err return available
|
|
||||||
errvar := ""
|
|
||||||
for _, param := range out {
|
|
||||||
p := parseParam(param)
|
|
||||||
if p.Type == "error" {
|
|
||||||
errvar = p.Name
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare arguments to Syscall.
|
|
||||||
var args []string
|
|
||||||
n := 0
|
|
||||||
for _, param := range in {
|
|
||||||
p := parseParam(param)
|
|
||||||
if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
|
|
||||||
args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
|
|
||||||
} else if p.Type == "string" && errvar != "" {
|
|
||||||
text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
|
|
||||||
text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
|
|
||||||
text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
|
||||||
n++
|
|
||||||
} else if p.Type == "string" {
|
|
||||||
fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
|
|
||||||
text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
|
|
||||||
text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name)
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
|
|
||||||
n++
|
|
||||||
} else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil {
|
|
||||||
// Convert slice into pointer, length.
|
|
||||||
// Have to be careful not to take address of &a[0] if len == 0:
|
|
||||||
// pass nil in that case.
|
|
||||||
text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1])
|
|
||||||
text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
|
|
||||||
n++
|
|
||||||
} else if p.Type == "int64" && endianness != "" {
|
|
||||||
if endianness == "big-endian" {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
|
|
||||||
}
|
|
||||||
} else if p.Type == "bool" {
|
|
||||||
text += fmt.Sprintf("\tvar _p%d uint32\n", n)
|
|
||||||
text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(_p%d)", n))
|
|
||||||
n++
|
|
||||||
} else {
|
|
||||||
args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nargs := len(args)
|
|
||||||
|
|
||||||
// Determine which form to use; pad args with zeros.
|
|
||||||
asm := "sysvicall6"
|
|
||||||
if nonblock != nil {
|
|
||||||
asm = "rawSysvicall6"
|
|
||||||
}
|
|
||||||
if len(args) <= 6 {
|
|
||||||
for len(args) < 6 {
|
|
||||||
args = append(args, "0")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Actual call.
|
|
||||||
arglist := strings.Join(args, ", ")
|
|
||||||
call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist)
|
|
||||||
|
|
||||||
// Assign return values.
|
|
||||||
body := ""
|
|
||||||
ret := []string{"_", "_", "_"}
|
|
||||||
doErrno := false
|
|
||||||
for i := 0; i < len(out); i++ {
|
|
||||||
p := parseParam(out[i])
|
|
||||||
reg := ""
|
|
||||||
if p.Name == "err" {
|
|
||||||
reg = "e1"
|
|
||||||
ret[2] = reg
|
|
||||||
doErrno = true
|
|
||||||
} else {
|
|
||||||
reg = fmt.Sprintf("r%d", i)
|
|
||||||
ret[i] = reg
|
|
||||||
}
|
|
||||||
if p.Type == "bool" {
|
|
||||||
reg = fmt.Sprintf("%d != 0", reg)
|
|
||||||
}
|
|
||||||
if p.Type == "int64" && endianness != "" {
|
|
||||||
// 64-bit number in r1:r0 or r0:r1.
|
|
||||||
if i+2 > len(out) {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if endianness == "big-endian" {
|
|
||||||
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
|
|
||||||
} else {
|
|
||||||
reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
|
|
||||||
}
|
|
||||||
ret[i] = fmt.Sprintf("r%d", i)
|
|
||||||
ret[i+1] = fmt.Sprintf("r%d", i+1)
|
|
||||||
}
|
|
||||||
if reg != "e1" {
|
|
||||||
body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
|
|
||||||
text += fmt.Sprintf("\t%s\n", call)
|
|
||||||
} else {
|
|
||||||
text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
|
|
||||||
}
|
|
||||||
text += body
|
|
||||||
|
|
||||||
if doErrno {
|
|
||||||
text += "\tif e1 != 0 {\n"
|
|
||||||
text += "\t\terr = e1\n"
|
|
||||||
text += "\t}\n"
|
|
||||||
}
|
|
||||||
text += "\treturn\n"
|
|
||||||
text += "}\n"
|
|
||||||
}
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
file.Close()
|
|
||||||
}
|
|
||||||
imp := ""
|
|
||||||
if pack != "unix" {
|
|
||||||
imp = "import \"golang.org/x/sys/unix\"\n"
|
|
||||||
|
|
||||||
}
|
|
||||||
vardecls := "\t" + strings.Join(vars, ",\n\t")
|
|
||||||
vardecls += " syscallFunc"
|
|
||||||
fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
const srcTemplate = `// %s
|
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s
|
|
||||||
|
|
||||||
package %s
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
%s
|
|
||||||
%s
|
|
||||||
%s
|
|
||||||
var (
|
|
||||||
%s
|
|
||||||
)
|
|
||||||
|
|
||||||
%s
|
|
||||||
`
|
|
|
@ -1,355 +0,0 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
|
|
||||||
//
|
|
||||||
// Build a MIB with each entry being an array containing the level, type and
|
|
||||||
// a hash that will contain additional entries if the current entry is a node.
|
|
||||||
// We then walk this MIB and create a flattened sysctl name to OID hash.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
goos, goarch string
|
|
||||||
)
|
|
||||||
|
|
||||||
// cmdLine returns this programs's commandline arguments.
|
|
||||||
func cmdLine() string {
|
|
||||||
return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTags returns build tags.
|
|
||||||
func buildTags() string {
|
|
||||||
return fmt.Sprintf("%s,%s", goarch, goos)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reMatch performs regular expression match and stores the substring slice to value pointed by m.
|
|
||||||
func reMatch(re *regexp.Regexp, str string, m *[]string) bool {
|
|
||||||
*m = re.FindStringSubmatch(str)
|
|
||||||
if *m != nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeElement struct {
|
|
||||||
n int
|
|
||||||
t string
|
|
||||||
pE *map[string]nodeElement
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
debugEnabled bool
|
|
||||||
mib map[string]nodeElement
|
|
||||||
node *map[string]nodeElement
|
|
||||||
nodeMap map[string]string
|
|
||||||
sysCtl []string
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`)
|
|
||||||
ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`)
|
|
||||||
ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`)
|
|
||||||
netInetRE = regexp.MustCompile(`^netinet/`)
|
|
||||||
netInet6RE = regexp.MustCompile(`^netinet6/`)
|
|
||||||
netRE = regexp.MustCompile(`^net/`)
|
|
||||||
bracesRE = regexp.MustCompile(`{.*}`)
|
|
||||||
ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`)
|
|
||||||
fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`)
|
|
||||||
)
|
|
||||||
|
|
||||||
func debug(s string) {
|
|
||||||
if debugEnabled {
|
|
||||||
fmt.Fprintln(os.Stderr, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk the MIB and build a sysctl name to OID mapping.
|
|
||||||
func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) {
|
|
||||||
lNode := pNode // local copy of pointer to node
|
|
||||||
var keys []string
|
|
||||||
for k := range *lNode {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
nodename := name
|
|
||||||
if name != "" {
|
|
||||||
nodename += "."
|
|
||||||
}
|
|
||||||
nodename += key
|
|
||||||
|
|
||||||
nodeoid := append(oid, (*pNode)[key].n)
|
|
||||||
|
|
||||||
if (*pNode)[key].t == `CTLTYPE_NODE` {
|
|
||||||
if _, ok := nodeMap[nodename]; ok {
|
|
||||||
lNode = &mib
|
|
||||||
ctlName := nodeMap[nodename]
|
|
||||||
for _, part := range strings.Split(ctlName, ".") {
|
|
||||||
lNode = ((*lNode)[part]).pE
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
lNode = (*pNode)[key].pE
|
|
||||||
}
|
|
||||||
buildSysctl(lNode, nodename, nodeoid)
|
|
||||||
} else if (*pNode)[key].t != "" {
|
|
||||||
oidStr := []string{}
|
|
||||||
for j := range nodeoid {
|
|
||||||
oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j]))
|
|
||||||
}
|
|
||||||
text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n"
|
|
||||||
sysCtl = append(sysCtl, text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Get the OS (using GOOS_TARGET if it exist)
|
|
||||||
goos = os.Getenv("GOOS_TARGET")
|
|
||||||
if goos == "" {
|
|
||||||
goos = os.Getenv("GOOS")
|
|
||||||
}
|
|
||||||
// Get the architecture (using GOARCH_TARGET if it exists)
|
|
||||||
goarch = os.Getenv("GOARCH_TARGET")
|
|
||||||
if goarch == "" {
|
|
||||||
goarch = os.Getenv("GOARCH")
|
|
||||||
}
|
|
||||||
// Check if GOOS and GOARCH environment variables are defined
|
|
||||||
if goarch == "" || goos == "" {
|
|
||||||
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
mib = make(map[string]nodeElement)
|
|
||||||
headers := [...]string{
|
|
||||||
`sys/sysctl.h`,
|
|
||||||
`sys/socket.h`,
|
|
||||||
`sys/tty.h`,
|
|
||||||
`sys/malloc.h`,
|
|
||||||
`sys/mount.h`,
|
|
||||||
`sys/namei.h`,
|
|
||||||
`sys/sem.h`,
|
|
||||||
`sys/shm.h`,
|
|
||||||
`sys/vmmeter.h`,
|
|
||||||
`uvm/uvmexp.h`,
|
|
||||||
`uvm/uvm_param.h`,
|
|
||||||
`uvm/uvm_swap_encrypt.h`,
|
|
||||||
`ddb/db_var.h`,
|
|
||||||
`net/if.h`,
|
|
||||||
`net/if_pfsync.h`,
|
|
||||||
`net/pipex.h`,
|
|
||||||
`netinet/in.h`,
|
|
||||||
`netinet/icmp_var.h`,
|
|
||||||
`netinet/igmp_var.h`,
|
|
||||||
`netinet/ip_ah.h`,
|
|
||||||
`netinet/ip_carp.h`,
|
|
||||||
`netinet/ip_divert.h`,
|
|
||||||
`netinet/ip_esp.h`,
|
|
||||||
`netinet/ip_ether.h`,
|
|
||||||
`netinet/ip_gre.h`,
|
|
||||||
`netinet/ip_ipcomp.h`,
|
|
||||||
`netinet/ip_ipip.h`,
|
|
||||||
`netinet/pim_var.h`,
|
|
||||||
`netinet/tcp_var.h`,
|
|
||||||
`netinet/udp_var.h`,
|
|
||||||
`netinet6/in6.h`,
|
|
||||||
`netinet6/ip6_divert.h`,
|
|
||||||
`netinet6/pim6_var.h`,
|
|
||||||
`netinet/icmp6.h`,
|
|
||||||
`netmpls/mpls.h`,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctls := [...]string{
|
|
||||||
`kern`,
|
|
||||||
`vm`,
|
|
||||||
`fs`,
|
|
||||||
`net`,
|
|
||||||
//debug /* Special handling required */
|
|
||||||
`hw`,
|
|
||||||
//machdep /* Arch specific */
|
|
||||||
`user`,
|
|
||||||
`ddb`,
|
|
||||||
//vfs /* Special handling required */
|
|
||||||
`fs.posix`,
|
|
||||||
`kern.forkstat`,
|
|
||||||
`kern.intrcnt`,
|
|
||||||
`kern.malloc`,
|
|
||||||
`kern.nchstats`,
|
|
||||||
`kern.seminfo`,
|
|
||||||
`kern.shminfo`,
|
|
||||||
`kern.timecounter`,
|
|
||||||
`kern.tty`,
|
|
||||||
`kern.watchdog`,
|
|
||||||
`net.bpf`,
|
|
||||||
`net.ifq`,
|
|
||||||
`net.inet`,
|
|
||||||
`net.inet.ah`,
|
|
||||||
`net.inet.carp`,
|
|
||||||
`net.inet.divert`,
|
|
||||||
`net.inet.esp`,
|
|
||||||
`net.inet.etherip`,
|
|
||||||
`net.inet.gre`,
|
|
||||||
`net.inet.icmp`,
|
|
||||||
`net.inet.igmp`,
|
|
||||||
`net.inet.ip`,
|
|
||||||
`net.inet.ip.ifq`,
|
|
||||||
`net.inet.ipcomp`,
|
|
||||||
`net.inet.ipip`,
|
|
||||||
`net.inet.mobileip`,
|
|
||||||
`net.inet.pfsync`,
|
|
||||||
`net.inet.pim`,
|
|
||||||
`net.inet.tcp`,
|
|
||||||
`net.inet.udp`,
|
|
||||||
`net.inet6`,
|
|
||||||
`net.inet6.divert`,
|
|
||||||
`net.inet6.ip6`,
|
|
||||||
`net.inet6.icmp6`,
|
|
||||||
`net.inet6.pim6`,
|
|
||||||
`net.inet6.tcp6`,
|
|
||||||
`net.inet6.udp6`,
|
|
||||||
`net.mpls`,
|
|
||||||
`net.mpls.ifq`,
|
|
||||||
`net.key`,
|
|
||||||
`net.pflow`,
|
|
||||||
`net.pfsync`,
|
|
||||||
`net.pipex`,
|
|
||||||
`net.rt`,
|
|
||||||
`vm.swapencrypt`,
|
|
||||||
//vfsgenctl /* Special handling required */
|
|
||||||
}
|
|
||||||
|
|
||||||
// Node name "fixups"
|
|
||||||
ctlMap := map[string]string{
|
|
||||||
"ipproto": "net.inet",
|
|
||||||
"net.inet.ipproto": "net.inet",
|
|
||||||
"net.inet6.ipv6proto": "net.inet6",
|
|
||||||
"net.inet6.ipv6": "net.inet6.ip6",
|
|
||||||
"net.inet.icmpv6": "net.inet6.icmp6",
|
|
||||||
"net.inet6.divert6": "net.inet6.divert",
|
|
||||||
"net.inet6.tcp6": "net.inet.tcp",
|
|
||||||
"net.inet6.udp6": "net.inet.udp",
|
|
||||||
"mpls": "net.mpls",
|
|
||||||
"swpenc": "vm.swapencrypt",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Node mappings
|
|
||||||
nodeMap = map[string]string{
|
|
||||||
"net.inet.ip.ifq": "net.ifq",
|
|
||||||
"net.inet.pfsync": "net.pfsync",
|
|
||||||
"net.mpls.ifq": "net.ifq",
|
|
||||||
}
|
|
||||||
|
|
||||||
mCtls := make(map[string]bool)
|
|
||||||
for _, ctl := range ctls {
|
|
||||||
mCtls[ctl] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, header := range headers {
|
|
||||||
debug("Processing " + header)
|
|
||||||
file, err := os.Open(filepath.Join("/usr/include", header))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
s := bufio.NewScanner(file)
|
|
||||||
for s.Scan() {
|
|
||||||
var sub []string
|
|
||||||
if reMatch(ctlNames1RE, s.Text(), &sub) ||
|
|
||||||
reMatch(ctlNames2RE, s.Text(), &sub) ||
|
|
||||||
reMatch(ctlNames3RE, s.Text(), &sub) {
|
|
||||||
if sub[1] == `CTL_NAMES` {
|
|
||||||
// Top level.
|
|
||||||
node = &mib
|
|
||||||
} else {
|
|
||||||
// Node.
|
|
||||||
nodename := strings.ToLower(sub[2])
|
|
||||||
ctlName := ""
|
|
||||||
if reMatch(netInetRE, header, &sub) {
|
|
||||||
ctlName = "net.inet." + nodename
|
|
||||||
} else if reMatch(netInet6RE, header, &sub) {
|
|
||||||
ctlName = "net.inet6." + nodename
|
|
||||||
} else if reMatch(netRE, header, &sub) {
|
|
||||||
ctlName = "net." + nodename
|
|
||||||
} else {
|
|
||||||
ctlName = nodename
|
|
||||||
ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val, ok := ctlMap[ctlName]; ok {
|
|
||||||
ctlName = val
|
|
||||||
}
|
|
||||||
if _, ok := mCtls[ctlName]; !ok {
|
|
||||||
debug("Ignoring " + ctlName + "...")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk down from the top of the MIB.
|
|
||||||
node = &mib
|
|
||||||
for _, part := range strings.Split(ctlName, ".") {
|
|
||||||
if _, ok := (*node)[part]; !ok {
|
|
||||||
debug("Missing node " + part)
|
|
||||||
(*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}}
|
|
||||||
}
|
|
||||||
node = (*node)[part].pE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Populate current node with entries.
|
|
||||||
i := -1
|
|
||||||
for !strings.HasPrefix(s.Text(), "}") {
|
|
||||||
s.Scan()
|
|
||||||
if reMatch(bracesRE, s.Text(), &sub) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if !reMatch(ctlTypeRE, s.Text(), &sub) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
(*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = s.Err()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
file.Close()
|
|
||||||
}
|
|
||||||
buildSysctl(&mib, "", []int{})
|
|
||||||
|
|
||||||
sort.Strings(sysCtl)
|
|
||||||
text := strings.Join(sysCtl, "")
|
|
||||||
|
|
||||||
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
|
|
||||||
}
|
|
||||||
|
|
||||||
const srcTemplate = `// %s
|
|
||||||
// Code generated by the command above; DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
type mibentry struct {
|
|
||||||
ctlname string
|
|
||||||
ctloid []_C_int
|
|
||||||
}
|
|
||||||
|
|
||||||
var sysctlMib = []mibentry {
|
|
||||||
%s
|
|
||||||
}
|
|
||||||
`
|
|
|
@ -1,190 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// Generate system call table for DragonFly, NetBSD,
|
|
||||||
// FreeBSD, OpenBSD or Darwin from master list
|
|
||||||
// (for example, /usr/src/sys/kern/syscalls.master or
|
|
||||||
// sys/syscall.h).
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
goos, goarch string
|
|
||||||
)
|
|
||||||
|
|
||||||
// cmdLine returns this programs's commandline arguments
|
|
||||||
func cmdLine() string {
|
|
||||||
return "go run mksysnum.go " + strings.Join(os.Args[1:], " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTags returns build tags
|
|
||||||
func buildTags() string {
|
|
||||||
return fmt.Sprintf("%s,%s", goarch, goos)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkErr(err error) {
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// source string and substring slice for regexp
|
|
||||||
type re struct {
|
|
||||||
str string // source string
|
|
||||||
sub []string // matched sub-string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match performs regular expression match
|
|
||||||
func (r *re) Match(exp string) bool {
|
|
||||||
r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str)
|
|
||||||
if r.sub != nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchFile fetches a text file from URL
|
|
||||||
func fetchFile(URL string) io.Reader {
|
|
||||||
resp, err := http.Get(URL)
|
|
||||||
checkErr(err)
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
checkErr(err)
|
|
||||||
return strings.NewReader(string(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
// readFile reads a text file from path
|
|
||||||
func readFile(path string) io.Reader {
|
|
||||||
file, err := os.Open(os.Args[1])
|
|
||||||
checkErr(err)
|
|
||||||
return file
|
|
||||||
}
|
|
||||||
|
|
||||||
func format(name, num, proto string) string {
|
|
||||||
name = strings.ToUpper(name)
|
|
||||||
// There are multiple entries for enosys and nosys, so comment them out.
|
|
||||||
nm := re{str: name}
|
|
||||||
if nm.Match(`^SYS_E?NOSYS$`) {
|
|
||||||
name = fmt.Sprintf("// %s", name)
|
|
||||||
}
|
|
||||||
if name == `SYS_SYS_EXIT` {
|
|
||||||
name = `SYS_EXIT`
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Get the OS (using GOOS_TARGET if it exist)
|
|
||||||
goos = os.Getenv("GOOS_TARGET")
|
|
||||||
if goos == "" {
|
|
||||||
goos = os.Getenv("GOOS")
|
|
||||||
}
|
|
||||||
// Get the architecture (using GOARCH_TARGET if it exists)
|
|
||||||
goarch = os.Getenv("GOARCH_TARGET")
|
|
||||||
if goarch == "" {
|
|
||||||
goarch = os.Getenv("GOARCH")
|
|
||||||
}
|
|
||||||
// Check if GOOS and GOARCH environment variables are defined
|
|
||||||
if goarch == "" || goos == "" {
|
|
||||||
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
file := strings.TrimSpace(os.Args[1])
|
|
||||||
var syscalls io.Reader
|
|
||||||
if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") {
|
|
||||||
// Download syscalls.master file
|
|
||||||
syscalls = fetchFile(file)
|
|
||||||
} else {
|
|
||||||
syscalls = readFile(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
var text, line string
|
|
||||||
s := bufio.NewScanner(syscalls)
|
|
||||||
for s.Scan() {
|
|
||||||
t := re{str: line}
|
|
||||||
if t.Match(`^(.*)\\$`) {
|
|
||||||
// Handle continuation
|
|
||||||
line = t.sub[1]
|
|
||||||
line += strings.TrimLeft(s.Text(), " \t")
|
|
||||||
} else {
|
|
||||||
// New line
|
|
||||||
line = s.Text()
|
|
||||||
}
|
|
||||||
t = re{str: line}
|
|
||||||
if t.Match(`\\$`) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t = re{str: line}
|
|
||||||
|
|
||||||
switch goos {
|
|
||||||
case "dragonfly":
|
|
||||||
if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) {
|
|
||||||
num, proto := t.sub[1], t.sub[2]
|
|
||||||
name := fmt.Sprintf("SYS_%s", t.sub[3])
|
|
||||||
text += format(name, num, proto)
|
|
||||||
}
|
|
||||||
case "freebsd":
|
|
||||||
if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) {
|
|
||||||
num, proto := t.sub[1], t.sub[2]
|
|
||||||
name := fmt.Sprintf("SYS_%s", t.sub[3])
|
|
||||||
text += format(name, num, proto)
|
|
||||||
}
|
|
||||||
case "openbsd":
|
|
||||||
if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) {
|
|
||||||
num, proto, name := t.sub[1], t.sub[3], t.sub[4]
|
|
||||||
text += format(name, num, proto)
|
|
||||||
}
|
|
||||||
case "netbsd":
|
|
||||||
if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) {
|
|
||||||
num, proto, compat := t.sub[1], t.sub[6], t.sub[8]
|
|
||||||
name := t.sub[7] + "_" + t.sub[9]
|
|
||||||
if t.sub[11] != "" {
|
|
||||||
name = t.sub[7] + "_" + t.sub[11]
|
|
||||||
}
|
|
||||||
name = strings.ToUpper(name)
|
|
||||||
if compat == "" || compat == "13" || compat == "30" || compat == "50" {
|
|
||||||
text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "darwin":
|
|
||||||
if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) {
|
|
||||||
name, num := t.sub[1], t.sub[2]
|
|
||||||
name = strings.ToUpper(name)
|
|
||||||
text += fmt.Sprintf(" SYS_%s = %s;\n", name, num)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos)
|
|
||||||
os.Exit(1)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err := s.Err()
|
|
||||||
checkErr(err)
|
|
||||||
|
|
||||||
fmt.Printf(template, cmdLine(), buildTags(), text)
|
|
||||||
}
|
|
||||||
|
|
||||||
const template = `// %s
|
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
|
||||||
|
|
||||||
// +build %s
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
const(
|
|
||||||
%s)`
|
|
|
@ -1,237 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
// +build aix
|
|
||||||
|
|
||||||
/*
|
|
||||||
Input to cgo -godefs. See also mkerrors.sh and mkall.sh
|
|
||||||
*/
|
|
||||||
|
|
||||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
|
||||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <sys/limits.h>
|
|
||||||
#include <sys/un.h>
|
|
||||||
#include <utime.h>
|
|
||||||
#include <sys/utsname.h>
|
|
||||||
#include <sys/poll.h>
|
|
||||||
#include <sys/resource.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/statfs.h>
|
|
||||||
#include <sys/termio.h>
|
|
||||||
#include <sys/ioctl.h>
|
|
||||||
|
|
||||||
#include <termios.h>
|
|
||||||
|
|
||||||
#include <net/if.h>
|
|
||||||
#include <net/if_dl.h>
|
|
||||||
#include <netinet/in.h>
|
|
||||||
#include <netinet/icmp6.h>
|
|
||||||
|
|
||||||
|
|
||||||
#include <dirent.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
|
|
||||||
enum {
|
|
||||||
sizeofPtr = sizeof(void*),
|
|
||||||
};
|
|
||||||
|
|
||||||
union sockaddr_all {
|
|
||||||
struct sockaddr s1; // this one gets used for fields
|
|
||||||
struct sockaddr_in s2; // these pad it out
|
|
||||||
struct sockaddr_in6 s3;
|
|
||||||
struct sockaddr_un s4;
|
|
||||||
struct sockaddr_dl s5;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct sockaddr_any {
|
|
||||||
struct sockaddr addr;
|
|
||||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
|
||||||
};
|
|
||||||
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
// Machine characteristics
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofPtr = C.sizeofPtr
|
|
||||||
SizeofShort = C.sizeof_short
|
|
||||||
SizeofInt = C.sizeof_int
|
|
||||||
SizeofLong = C.sizeof_long
|
|
||||||
SizeofLongLong = C.sizeof_longlong
|
|
||||||
PathMax = C.PATH_MAX
|
|
||||||
)
|
|
||||||
|
|
||||||
// Basic types
|
|
||||||
|
|
||||||
type (
|
|
||||||
_C_short C.short
|
|
||||||
_C_int C.int
|
|
||||||
_C_long C.long
|
|
||||||
_C_long_long C.longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
type off64 C.off64_t
|
|
||||||
type off C.off_t
|
|
||||||
type Mode_t C.mode_t
|
|
||||||
|
|
||||||
// Time
|
|
||||||
|
|
||||||
type Timespec C.struct_timespec
|
|
||||||
|
|
||||||
type Timeval C.struct_timeval
|
|
||||||
|
|
||||||
type Timeval32 C.struct_timeval32
|
|
||||||
|
|
||||||
type Timex C.struct_timex
|
|
||||||
|
|
||||||
type Time_t C.time_t
|
|
||||||
|
|
||||||
type Tms C.struct_tms
|
|
||||||
|
|
||||||
type Utimbuf C.struct_utimbuf
|
|
||||||
|
|
||||||
type Timezone C.struct_timezone
|
|
||||||
|
|
||||||
// Processes
|
|
||||||
|
|
||||||
type Rusage C.struct_rusage
|
|
||||||
|
|
||||||
type Rlimit C.struct_rlimit64
|
|
||||||
|
|
||||||
type Pid_t C.pid_t
|
|
||||||
|
|
||||||
type _Gid_t C.gid_t
|
|
||||||
|
|
||||||
type dev_t C.dev_t
|
|
||||||
|
|
||||||
// Files
|
|
||||||
|
|
||||||
type Stat_t C.struct_stat
|
|
||||||
|
|
||||||
type StatxTimestamp C.struct_statx_timestamp
|
|
||||||
|
|
||||||
type Statx_t C.struct_statx
|
|
||||||
|
|
||||||
type Dirent C.struct_dirent
|
|
||||||
|
|
||||||
// Sockets
|
|
||||||
|
|
||||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
|
||||||
|
|
||||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
|
||||||
|
|
||||||
type RawSockaddrUnix C.struct_sockaddr_un
|
|
||||||
|
|
||||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
|
||||||
|
|
||||||
type RawSockaddr C.struct_sockaddr
|
|
||||||
|
|
||||||
type RawSockaddrAny C.struct_sockaddr_any
|
|
||||||
|
|
||||||
type _Socklen C.socklen_t
|
|
||||||
|
|
||||||
type Cmsghdr C.struct_cmsghdr
|
|
||||||
|
|
||||||
type ICMPv6Filter C.struct_icmp6_filter
|
|
||||||
|
|
||||||
type Iovec C.struct_iovec
|
|
||||||
|
|
||||||
type IPMreq C.struct_ip_mreq
|
|
||||||
|
|
||||||
type IPv6Mreq C.struct_ipv6_mreq
|
|
||||||
|
|
||||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
|
||||||
|
|
||||||
type Linger C.struct_linger
|
|
||||||
|
|
||||||
type Msghdr C.struct_msghdr
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
|
||||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
|
||||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
|
||||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
|
||||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
|
||||||
SizeofLinger = C.sizeof_struct_linger
|
|
||||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
|
||||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
|
||||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
|
||||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
|
||||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
|
||||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
// Routing and interface messages
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
|
||||||
)
|
|
||||||
|
|
||||||
type IfMsgHdr C.struct_if_msghdr
|
|
||||||
|
|
||||||
// Misc
|
|
||||||
|
|
||||||
type FdSet C.fd_set
|
|
||||||
|
|
||||||
type Utsname C.struct_utsname
|
|
||||||
|
|
||||||
type Ustat_t C.struct_ustat
|
|
||||||
|
|
||||||
type Sigset_t C.sigset_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
AT_FDCWD = C.AT_FDCWD
|
|
||||||
AT_REMOVEDIR = C.AT_REMOVEDIR
|
|
||||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
|
||||||
)
|
|
||||||
|
|
||||||
// Terminal handling
|
|
||||||
|
|
||||||
type Termios C.struct_termios
|
|
||||||
|
|
||||||
type Termio C.struct_termio
|
|
||||||
|
|
||||||
type Winsize C.struct_winsize
|
|
||||||
|
|
||||||
//poll
|
|
||||||
|
|
||||||
type PollFd struct {
|
|
||||||
Fd int32
|
|
||||||
Events uint16
|
|
||||||
Revents uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
POLLERR = C.POLLERR
|
|
||||||
POLLHUP = C.POLLHUP
|
|
||||||
POLLIN = C.POLLIN
|
|
||||||
POLLNVAL = C.POLLNVAL
|
|
||||||
POLLOUT = C.POLLOUT
|
|
||||||
POLLPRI = C.POLLPRI
|
|
||||||
POLLRDBAND = C.POLLRDBAND
|
|
||||||
POLLRDNORM = C.POLLRDNORM
|
|
||||||
POLLWRBAND = C.POLLWRBAND
|
|
||||||
POLLWRNORM = C.POLLWRNORM
|
|
||||||
)
|
|
||||||
|
|
||||||
//flock_t
|
|
||||||
|
|
||||||
type Flock_t C.struct_flock64
|
|
||||||
|
|
||||||
// Statfs
|
|
||||||
|
|
||||||
type Fsid_t C.struct_fsid_t
|
|
||||||
type Fsid64_t C.struct_fsid64_t
|
|
||||||
|
|
||||||
type Statfs_t C.struct_statfs
|
|
||||||
|
|
||||||
const RNDGETENTCNT = 0x80045200
|
|
|
@ -1,283 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
Input to cgo -godefs. See README.md
|
|
||||||
*/
|
|
||||||
|
|
||||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
|
||||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
/*
|
|
||||||
#define __DARWIN_UNIX03 0
|
|
||||||
#define KERNEL
|
|
||||||
#define _DARWIN_USE_64_BIT_INODE
|
|
||||||
#include <dirent.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <poll.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <termios.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <mach/mach.h>
|
|
||||||
#include <mach/message.h>
|
|
||||||
#include <sys/event.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/mount.h>
|
|
||||||
#include <sys/param.h>
|
|
||||||
#include <sys/ptrace.h>
|
|
||||||
#include <sys/resource.h>
|
|
||||||
#include <sys/select.h>
|
|
||||||
#include <sys/signal.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/uio.h>
|
|
||||||
#include <sys/un.h>
|
|
||||||
#include <sys/utsname.h>
|
|
||||||
#include <sys/wait.h>
|
|
||||||
#include <net/bpf.h>
|
|
||||||
#include <net/if.h>
|
|
||||||
#include <net/if_dl.h>
|
|
||||||
#include <net/if_var.h>
|
|
||||||
#include <net/route.h>
|
|
||||||
#include <netinet/in.h>
|
|
||||||
#include <netinet/icmp6.h>
|
|
||||||
#include <netinet/tcp.h>
|
|
||||||
|
|
||||||
enum {
|
|
||||||
sizeofPtr = sizeof(void*),
|
|
||||||
};
|
|
||||||
|
|
||||||
union sockaddr_all {
|
|
||||||
struct sockaddr s1; // this one gets used for fields
|
|
||||||
struct sockaddr_in s2; // these pad it out
|
|
||||||
struct sockaddr_in6 s3;
|
|
||||||
struct sockaddr_un s4;
|
|
||||||
struct sockaddr_dl s5;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct sockaddr_any {
|
|
||||||
struct sockaddr addr;
|
|
||||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
|
||||||
};
|
|
||||||
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
// Machine characteristics
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofPtr = C.sizeofPtr
|
|
||||||
SizeofShort = C.sizeof_short
|
|
||||||
SizeofInt = C.sizeof_int
|
|
||||||
SizeofLong = C.sizeof_long
|
|
||||||
SizeofLongLong = C.sizeof_longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Basic types
|
|
||||||
|
|
||||||
type (
|
|
||||||
_C_short C.short
|
|
||||||
_C_int C.int
|
|
||||||
_C_long C.long
|
|
||||||
_C_long_long C.longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time
|
|
||||||
|
|
||||||
type Timespec C.struct_timespec
|
|
||||||
|
|
||||||
type Timeval C.struct_timeval
|
|
||||||
|
|
||||||
type Timeval32 C.struct_timeval32
|
|
||||||
|
|
||||||
// Processes
|
|
||||||
|
|
||||||
type Rusage C.struct_rusage
|
|
||||||
|
|
||||||
type Rlimit C.struct_rlimit
|
|
||||||
|
|
||||||
type _Gid_t C.gid_t
|
|
||||||
|
|
||||||
// Files
|
|
||||||
|
|
||||||
type Stat_t C.struct_stat64
|
|
||||||
|
|
||||||
type Statfs_t C.struct_statfs64
|
|
||||||
|
|
||||||
type Flock_t C.struct_flock
|
|
||||||
|
|
||||||
type Fstore_t C.struct_fstore
|
|
||||||
|
|
||||||
type Radvisory_t C.struct_radvisory
|
|
||||||
|
|
||||||
type Fbootstraptransfer_t C.struct_fbootstraptransfer
|
|
||||||
|
|
||||||
type Log2phys_t C.struct_log2phys
|
|
||||||
|
|
||||||
type Fsid C.struct_fsid
|
|
||||||
|
|
||||||
type Dirent C.struct_dirent
|
|
||||||
|
|
||||||
// Sockets
|
|
||||||
|
|
||||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
|
||||||
|
|
||||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
|
||||||
|
|
||||||
type RawSockaddrUnix C.struct_sockaddr_un
|
|
||||||
|
|
||||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
|
||||||
|
|
||||||
type RawSockaddr C.struct_sockaddr
|
|
||||||
|
|
||||||
type RawSockaddrAny C.struct_sockaddr_any
|
|
||||||
|
|
||||||
type _Socklen C.socklen_t
|
|
||||||
|
|
||||||
type Linger C.struct_linger
|
|
||||||
|
|
||||||
type Iovec C.struct_iovec
|
|
||||||
|
|
||||||
type IPMreq C.struct_ip_mreq
|
|
||||||
|
|
||||||
type IPv6Mreq C.struct_ipv6_mreq
|
|
||||||
|
|
||||||
type Msghdr C.struct_msghdr
|
|
||||||
|
|
||||||
type Cmsghdr C.struct_cmsghdr
|
|
||||||
|
|
||||||
type Inet4Pktinfo C.struct_in_pktinfo
|
|
||||||
|
|
||||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
|
||||||
|
|
||||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
|
||||||
|
|
||||||
type ICMPv6Filter C.struct_icmp6_filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
|
||||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
|
||||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
|
||||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
|
||||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
|
||||||
SizeofLinger = C.sizeof_struct_linger
|
|
||||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
|
||||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
|
||||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
|
||||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
|
||||||
SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
|
|
||||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
|
||||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
|
||||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ptrace requests
|
|
||||||
|
|
||||||
const (
|
|
||||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
|
||||||
PTRACE_CONT = C.PT_CONTINUE
|
|
||||||
PTRACE_KILL = C.PT_KILL
|
|
||||||
)
|
|
||||||
|
|
||||||
// Events (kqueue, kevent)
|
|
||||||
|
|
||||||
type Kevent_t C.struct_kevent
|
|
||||||
|
|
||||||
// Select
|
|
||||||
|
|
||||||
type FdSet C.fd_set
|
|
||||||
|
|
||||||
// Routing and interface messages
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
|
||||||
SizeofIfData = C.sizeof_struct_if_data
|
|
||||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
|
||||||
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
|
|
||||||
SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
|
|
||||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
|
||||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
|
||||||
)
|
|
||||||
|
|
||||||
type IfMsghdr C.struct_if_msghdr
|
|
||||||
|
|
||||||
type IfData C.struct_if_data
|
|
||||||
|
|
||||||
type IfaMsghdr C.struct_ifa_msghdr
|
|
||||||
|
|
||||||
type IfmaMsghdr C.struct_ifma_msghdr
|
|
||||||
|
|
||||||
type IfmaMsghdr2 C.struct_ifma_msghdr2
|
|
||||||
|
|
||||||
type RtMsghdr C.struct_rt_msghdr
|
|
||||||
|
|
||||||
type RtMetrics C.struct_rt_metrics
|
|
||||||
|
|
||||||
// Berkeley packet filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
|
||||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
|
||||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
|
||||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
|
||||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
|
||||||
)
|
|
||||||
|
|
||||||
type BpfVersion C.struct_bpf_version
|
|
||||||
|
|
||||||
type BpfStat C.struct_bpf_stat
|
|
||||||
|
|
||||||
type BpfProgram C.struct_bpf_program
|
|
||||||
|
|
||||||
type BpfInsn C.struct_bpf_insn
|
|
||||||
|
|
||||||
type BpfHdr C.struct_bpf_hdr
|
|
||||||
|
|
||||||
// Terminal handling
|
|
||||||
|
|
||||||
type Termios C.struct_termios
|
|
||||||
|
|
||||||
type Winsize C.struct_winsize
|
|
||||||
|
|
||||||
// fchmodat-like syscalls.
|
|
||||||
|
|
||||||
const (
|
|
||||||
AT_FDCWD = C.AT_FDCWD
|
|
||||||
AT_REMOVEDIR = C.AT_REMOVEDIR
|
|
||||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
|
||||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
|
||||||
)
|
|
||||||
|
|
||||||
// poll
|
|
||||||
|
|
||||||
type PollFd C.struct_pollfd
|
|
||||||
|
|
||||||
const (
|
|
||||||
POLLERR = C.POLLERR
|
|
||||||
POLLHUP = C.POLLHUP
|
|
||||||
POLLIN = C.POLLIN
|
|
||||||
POLLNVAL = C.POLLNVAL
|
|
||||||
POLLOUT = C.POLLOUT
|
|
||||||
POLLPRI = C.POLLPRI
|
|
||||||
POLLRDBAND = C.POLLRDBAND
|
|
||||||
POLLRDNORM = C.POLLRDNORM
|
|
||||||
POLLWRBAND = C.POLLWRBAND
|
|
||||||
POLLWRNORM = C.POLLWRNORM
|
|
||||||
)
|
|
||||||
|
|
||||||
// uname
|
|
||||||
|
|
||||||
type Utsname C.struct_utsname
|
|
||||||
|
|
||||||
// Clockinfo
|
|
||||||
|
|
||||||
const SizeofClockinfo = C.sizeof_struct_clockinfo
|
|
||||||
|
|
||||||
type Clockinfo C.struct_clockinfo
|
|
|
@ -1,263 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
Input to cgo -godefs. See README.md
|
|
||||||
*/
|
|
||||||
|
|
||||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
|
||||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
/*
|
|
||||||
#define KERNEL
|
|
||||||
#include <dirent.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <poll.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <termios.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <sys/event.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/mount.h>
|
|
||||||
#include <sys/param.h>
|
|
||||||
#include <sys/ptrace.h>
|
|
||||||
#include <sys/resource.h>
|
|
||||||
#include <sys/select.h>
|
|
||||||
#include <sys/signal.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/un.h>
|
|
||||||
#include <sys/utsname.h>
|
|
||||||
#include <sys/wait.h>
|
|
||||||
#include <net/bpf.h>
|
|
||||||
#include <net/if.h>
|
|
||||||
#include <net/if_dl.h>
|
|
||||||
#include <net/route.h>
|
|
||||||
#include <netinet/in.h>
|
|
||||||
#include <netinet/icmp6.h>
|
|
||||||
#include <netinet/tcp.h>
|
|
||||||
|
|
||||||
enum {
|
|
||||||
sizeofPtr = sizeof(void*),
|
|
||||||
};
|
|
||||||
|
|
||||||
union sockaddr_all {
|
|
||||||
struct sockaddr s1; // this one gets used for fields
|
|
||||||
struct sockaddr_in s2; // these pad it out
|
|
||||||
struct sockaddr_in6 s3;
|
|
||||||
struct sockaddr_un s4;
|
|
||||||
struct sockaddr_dl s5;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct sockaddr_any {
|
|
||||||
struct sockaddr addr;
|
|
||||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
|
||||||
};
|
|
||||||
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
// Machine characteristics
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofPtr = C.sizeofPtr
|
|
||||||
SizeofShort = C.sizeof_short
|
|
||||||
SizeofInt = C.sizeof_int
|
|
||||||
SizeofLong = C.sizeof_long
|
|
||||||
SizeofLongLong = C.sizeof_longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Basic types
|
|
||||||
|
|
||||||
type (
|
|
||||||
_C_short C.short
|
|
||||||
_C_int C.int
|
|
||||||
_C_long C.long
|
|
||||||
_C_long_long C.longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time
|
|
||||||
|
|
||||||
type Timespec C.struct_timespec
|
|
||||||
|
|
||||||
type Timeval C.struct_timeval
|
|
||||||
|
|
||||||
// Processes
|
|
||||||
|
|
||||||
type Rusage C.struct_rusage
|
|
||||||
|
|
||||||
type Rlimit C.struct_rlimit
|
|
||||||
|
|
||||||
type _Gid_t C.gid_t
|
|
||||||
|
|
||||||
// Files
|
|
||||||
|
|
||||||
type Stat_t C.struct_stat
|
|
||||||
|
|
||||||
type Statfs_t C.struct_statfs
|
|
||||||
|
|
||||||
type Flock_t C.struct_flock
|
|
||||||
|
|
||||||
type Dirent C.struct_dirent
|
|
||||||
|
|
||||||
type Fsid C.struct_fsid
|
|
||||||
|
|
||||||
// File system limits
|
|
||||||
|
|
||||||
const (
|
|
||||||
PathMax = C.PATH_MAX
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sockets
|
|
||||||
|
|
||||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
|
||||||
|
|
||||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
|
||||||
|
|
||||||
type RawSockaddrUnix C.struct_sockaddr_un
|
|
||||||
|
|
||||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
|
||||||
|
|
||||||
type RawSockaddr C.struct_sockaddr
|
|
||||||
|
|
||||||
type RawSockaddrAny C.struct_sockaddr_any
|
|
||||||
|
|
||||||
type _Socklen C.socklen_t
|
|
||||||
|
|
||||||
type Linger C.struct_linger
|
|
||||||
|
|
||||||
type Iovec C.struct_iovec
|
|
||||||
|
|
||||||
type IPMreq C.struct_ip_mreq
|
|
||||||
|
|
||||||
type IPv6Mreq C.struct_ipv6_mreq
|
|
||||||
|
|
||||||
type Msghdr C.struct_msghdr
|
|
||||||
|
|
||||||
type Cmsghdr C.struct_cmsghdr
|
|
||||||
|
|
||||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
|
||||||
|
|
||||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
|
||||||
|
|
||||||
type ICMPv6Filter C.struct_icmp6_filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
|
||||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
|
||||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
|
||||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
|
||||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
|
||||||
SizeofLinger = C.sizeof_struct_linger
|
|
||||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
|
||||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
|
||||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
|
||||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
|
||||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
|
||||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
|
||||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ptrace requests
|
|
||||||
|
|
||||||
const (
|
|
||||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
|
||||||
PTRACE_CONT = C.PT_CONTINUE
|
|
||||||
PTRACE_KILL = C.PT_KILL
|
|
||||||
)
|
|
||||||
|
|
||||||
// Events (kqueue, kevent)
|
|
||||||
|
|
||||||
type Kevent_t C.struct_kevent
|
|
||||||
|
|
||||||
// Select
|
|
||||||
|
|
||||||
type FdSet C.fd_set
|
|
||||||
|
|
||||||
// Routing and interface messages
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
|
||||||
SizeofIfData = C.sizeof_struct_if_data
|
|
||||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
|
||||||
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
|
|
||||||
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
|
|
||||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
|
||||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
|
||||||
)
|
|
||||||
|
|
||||||
type IfMsghdr C.struct_if_msghdr
|
|
||||||
|
|
||||||
type IfData C.struct_if_data
|
|
||||||
|
|
||||||
type IfaMsghdr C.struct_ifa_msghdr
|
|
||||||
|
|
||||||
type IfmaMsghdr C.struct_ifma_msghdr
|
|
||||||
|
|
||||||
type IfAnnounceMsghdr C.struct_if_announcemsghdr
|
|
||||||
|
|
||||||
type RtMsghdr C.struct_rt_msghdr
|
|
||||||
|
|
||||||
type RtMetrics C.struct_rt_metrics
|
|
||||||
|
|
||||||
// Berkeley packet filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
|
||||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
|
||||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
|
||||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
|
||||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
|
||||||
)
|
|
||||||
|
|
||||||
type BpfVersion C.struct_bpf_version
|
|
||||||
|
|
||||||
type BpfStat C.struct_bpf_stat
|
|
||||||
|
|
||||||
type BpfProgram C.struct_bpf_program
|
|
||||||
|
|
||||||
type BpfInsn C.struct_bpf_insn
|
|
||||||
|
|
||||||
type BpfHdr C.struct_bpf_hdr
|
|
||||||
|
|
||||||
// Terminal handling
|
|
||||||
|
|
||||||
type Termios C.struct_termios
|
|
||||||
|
|
||||||
type Winsize C.struct_winsize
|
|
||||||
|
|
||||||
// fchmodat-like syscalls.
|
|
||||||
|
|
||||||
const (
|
|
||||||
AT_FDCWD = C.AT_FDCWD
|
|
||||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
|
||||||
)
|
|
||||||
|
|
||||||
// poll
|
|
||||||
|
|
||||||
type PollFd C.struct_pollfd
|
|
||||||
|
|
||||||
const (
|
|
||||||
POLLERR = C.POLLERR
|
|
||||||
POLLHUP = C.POLLHUP
|
|
||||||
POLLIN = C.POLLIN
|
|
||||||
POLLNVAL = C.POLLNVAL
|
|
||||||
POLLOUT = C.POLLOUT
|
|
||||||
POLLPRI = C.POLLPRI
|
|
||||||
POLLRDBAND = C.POLLRDBAND
|
|
||||||
POLLRDNORM = C.POLLRDNORM
|
|
||||||
POLLWRBAND = C.POLLWRBAND
|
|
||||||
POLLWRNORM = C.POLLWRNORM
|
|
||||||
)
|
|
||||||
|
|
||||||
// Uname
|
|
||||||
|
|
||||||
type Utsname C.struct_utsname
|
|
|
@ -1,400 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
Input to cgo -godefs. See README.md
|
|
||||||
*/
|
|
||||||
|
|
||||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
|
||||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
/*
|
|
||||||
#define _WANT_FREEBSD11_STAT 1
|
|
||||||
#define _WANT_FREEBSD11_STATFS 1
|
|
||||||
#define _WANT_FREEBSD11_DIRENT 1
|
|
||||||
#define _WANT_FREEBSD11_KEVENT 1
|
|
||||||
|
|
||||||
#include <dirent.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <poll.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <termios.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <sys/capsicum.h>
|
|
||||||
#include <sys/event.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/mount.h>
|
|
||||||
#include <sys/param.h>
|
|
||||||
#include <sys/ptrace.h>
|
|
||||||
#include <sys/resource.h>
|
|
||||||
#include <sys/select.h>
|
|
||||||
#include <sys/signal.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/un.h>
|
|
||||||
#include <sys/utsname.h>
|
|
||||||
#include <sys/wait.h>
|
|
||||||
#include <net/bpf.h>
|
|
||||||
#include <net/if.h>
|
|
||||||
#include <net/if_dl.h>
|
|
||||||
#include <net/route.h>
|
|
||||||
#include <netinet/in.h>
|
|
||||||
#include <netinet/icmp6.h>
|
|
||||||
#include <netinet/tcp.h>
|
|
||||||
|
|
||||||
enum {
|
|
||||||
sizeofPtr = sizeof(void*),
|
|
||||||
};
|
|
||||||
|
|
||||||
union sockaddr_all {
|
|
||||||
struct sockaddr s1; // this one gets used for fields
|
|
||||||
struct sockaddr_in s2; // these pad it out
|
|
||||||
struct sockaddr_in6 s3;
|
|
||||||
struct sockaddr_un s4;
|
|
||||||
struct sockaddr_dl s5;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct sockaddr_any {
|
|
||||||
struct sockaddr addr;
|
|
||||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
|
||||||
};
|
|
||||||
|
|
||||||
// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
|
|
||||||
// See /usr/include/net/if.h.
|
|
||||||
struct if_data8 {
|
|
||||||
u_char ifi_type;
|
|
||||||
u_char ifi_physical;
|
|
||||||
u_char ifi_addrlen;
|
|
||||||
u_char ifi_hdrlen;
|
|
||||||
u_char ifi_link_state;
|
|
||||||
u_char ifi_spare_char1;
|
|
||||||
u_char ifi_spare_char2;
|
|
||||||
u_char ifi_datalen;
|
|
||||||
u_long ifi_mtu;
|
|
||||||
u_long ifi_metric;
|
|
||||||
u_long ifi_baudrate;
|
|
||||||
u_long ifi_ipackets;
|
|
||||||
u_long ifi_ierrors;
|
|
||||||
u_long ifi_opackets;
|
|
||||||
u_long ifi_oerrors;
|
|
||||||
u_long ifi_collisions;
|
|
||||||
u_long ifi_ibytes;
|
|
||||||
u_long ifi_obytes;
|
|
||||||
u_long ifi_imcasts;
|
|
||||||
u_long ifi_omcasts;
|
|
||||||
u_long ifi_iqdrops;
|
|
||||||
u_long ifi_noproto;
|
|
||||||
u_long ifi_hwassist;
|
|
||||||
// FIXME: these are now unions, so maybe need to change definitions?
|
|
||||||
#undef ifi_epoch
|
|
||||||
time_t ifi_epoch;
|
|
||||||
#undef ifi_lastchange
|
|
||||||
struct timeval ifi_lastchange;
|
|
||||||
};
|
|
||||||
|
|
||||||
// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE.
|
|
||||||
// See /usr/include/net/if.h.
|
|
||||||
struct if_msghdr8 {
|
|
||||||
u_short ifm_msglen;
|
|
||||||
u_char ifm_version;
|
|
||||||
u_char ifm_type;
|
|
||||||
int ifm_addrs;
|
|
||||||
int ifm_flags;
|
|
||||||
u_short ifm_index;
|
|
||||||
struct if_data8 ifm_data;
|
|
||||||
};
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
// Machine characteristics
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofPtr = C.sizeofPtr
|
|
||||||
SizeofShort = C.sizeof_short
|
|
||||||
SizeofInt = C.sizeof_int
|
|
||||||
SizeofLong = C.sizeof_long
|
|
||||||
SizeofLongLong = C.sizeof_longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Basic types
|
|
||||||
|
|
||||||
type (
|
|
||||||
_C_short C.short
|
|
||||||
_C_int C.int
|
|
||||||
_C_long C.long
|
|
||||||
_C_long_long C.longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time
|
|
||||||
|
|
||||||
type Timespec C.struct_timespec
|
|
||||||
|
|
||||||
type Timeval C.struct_timeval
|
|
||||||
|
|
||||||
// Processes
|
|
||||||
|
|
||||||
type Rusage C.struct_rusage
|
|
||||||
|
|
||||||
type Rlimit C.struct_rlimit
|
|
||||||
|
|
||||||
type _Gid_t C.gid_t
|
|
||||||
|
|
||||||
// Files
|
|
||||||
|
|
||||||
const (
|
|
||||||
_statfsVersion = C.STATFS_VERSION
|
|
||||||
_dirblksiz = C.DIRBLKSIZ
|
|
||||||
)
|
|
||||||
|
|
||||||
type Stat_t C.struct_stat
|
|
||||||
|
|
||||||
type stat_freebsd11_t C.struct_freebsd11_stat
|
|
||||||
|
|
||||||
type Statfs_t C.struct_statfs
|
|
||||||
|
|
||||||
type statfs_freebsd11_t C.struct_freebsd11_statfs
|
|
||||||
|
|
||||||
type Flock_t C.struct_flock
|
|
||||||
|
|
||||||
type Dirent C.struct_dirent
|
|
||||||
|
|
||||||
type dirent_freebsd11 C.struct_freebsd11_dirent
|
|
||||||
|
|
||||||
type Fsid C.struct_fsid
|
|
||||||
|
|
||||||
// File system limits
|
|
||||||
|
|
||||||
const (
|
|
||||||
PathMax = C.PATH_MAX
|
|
||||||
)
|
|
||||||
|
|
||||||
// Advice to Fadvise
|
|
||||||
|
|
||||||
const (
|
|
||||||
FADV_NORMAL = C.POSIX_FADV_NORMAL
|
|
||||||
FADV_RANDOM = C.POSIX_FADV_RANDOM
|
|
||||||
FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
|
|
||||||
FADV_WILLNEED = C.POSIX_FADV_WILLNEED
|
|
||||||
FADV_DONTNEED = C.POSIX_FADV_DONTNEED
|
|
||||||
FADV_NOREUSE = C.POSIX_FADV_NOREUSE
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sockets
|
|
||||||
|
|
||||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
|
||||||
|
|
||||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
|
||||||
|
|
||||||
type RawSockaddrUnix C.struct_sockaddr_un
|
|
||||||
|
|
||||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
|
||||||
|
|
||||||
type RawSockaddr C.struct_sockaddr
|
|
||||||
|
|
||||||
type RawSockaddrAny C.struct_sockaddr_any
|
|
||||||
|
|
||||||
type _Socklen C.socklen_t
|
|
||||||
|
|
||||||
type Linger C.struct_linger
|
|
||||||
|
|
||||||
type Iovec C.struct_iovec
|
|
||||||
|
|
||||||
type IPMreq C.struct_ip_mreq
|
|
||||||
|
|
||||||
type IPMreqn C.struct_ip_mreqn
|
|
||||||
|
|
||||||
type IPv6Mreq C.struct_ipv6_mreq
|
|
||||||
|
|
||||||
type Msghdr C.struct_msghdr
|
|
||||||
|
|
||||||
type Cmsghdr C.struct_cmsghdr
|
|
||||||
|
|
||||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
|
||||||
|
|
||||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
|
||||||
|
|
||||||
type ICMPv6Filter C.struct_icmp6_filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
|
||||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
|
||||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
|
||||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
|
||||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
|
||||||
SizeofLinger = C.sizeof_struct_linger
|
|
||||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
|
||||||
SizeofIPMreqn = C.sizeof_struct_ip_mreqn
|
|
||||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
|
||||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
|
||||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
|
||||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
|
||||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
|
||||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ptrace requests
|
|
||||||
|
|
||||||
const (
|
|
||||||
PTRACE_ATTACH = C.PT_ATTACH
|
|
||||||
PTRACE_CONT = C.PT_CONTINUE
|
|
||||||
PTRACE_DETACH = C.PT_DETACH
|
|
||||||
PTRACE_GETFPREGS = C.PT_GETFPREGS
|
|
||||||
PTRACE_GETFSBASE = C.PT_GETFSBASE
|
|
||||||
PTRACE_GETLWPLIST = C.PT_GETLWPLIST
|
|
||||||
PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS
|
|
||||||
PTRACE_GETREGS = C.PT_GETREGS
|
|
||||||
PTRACE_GETXSTATE = C.PT_GETXSTATE
|
|
||||||
PTRACE_IO = C.PT_IO
|
|
||||||
PTRACE_KILL = C.PT_KILL
|
|
||||||
PTRACE_LWPEVENTS = C.PT_LWP_EVENTS
|
|
||||||
PTRACE_LWPINFO = C.PT_LWPINFO
|
|
||||||
PTRACE_SETFPREGS = C.PT_SETFPREGS
|
|
||||||
PTRACE_SETREGS = C.PT_SETREGS
|
|
||||||
PTRACE_SINGLESTEP = C.PT_STEP
|
|
||||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
PIOD_READ_D = C.PIOD_READ_D
|
|
||||||
PIOD_WRITE_D = C.PIOD_WRITE_D
|
|
||||||
PIOD_READ_I = C.PIOD_READ_I
|
|
||||||
PIOD_WRITE_I = C.PIOD_WRITE_I
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
PL_FLAG_BORN = C.PL_FLAG_BORN
|
|
||||||
PL_FLAG_EXITED = C.PL_FLAG_EXITED
|
|
||||||
PL_FLAG_SI = C.PL_FLAG_SI
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
TRAP_BRKPT = C.TRAP_BRKPT
|
|
||||||
TRAP_TRACE = C.TRAP_TRACE
|
|
||||||
)
|
|
||||||
|
|
||||||
type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo
|
|
||||||
|
|
||||||
type __Siginfo C.struct___siginfo
|
|
||||||
|
|
||||||
type Sigset_t C.sigset_t
|
|
||||||
|
|
||||||
type Reg C.struct_reg
|
|
||||||
|
|
||||||
type FpReg C.struct_fpreg
|
|
||||||
|
|
||||||
type PtraceIoDesc C.struct_ptrace_io_desc
|
|
||||||
|
|
||||||
// Events (kqueue, kevent)
|
|
||||||
|
|
||||||
type Kevent_t C.struct_kevent_freebsd11
|
|
||||||
|
|
||||||
// Select
|
|
||||||
|
|
||||||
type FdSet C.fd_set
|
|
||||||
|
|
||||||
// Routing and interface messages
|
|
||||||
|
|
||||||
const (
|
|
||||||
sizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
|
||||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr8
|
|
||||||
sizeofIfData = C.sizeof_struct_if_data
|
|
||||||
SizeofIfData = C.sizeof_struct_if_data8
|
|
||||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
|
||||||
SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
|
|
||||||
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
|
|
||||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
|
||||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
|
||||||
)
|
|
||||||
|
|
||||||
type ifMsghdr C.struct_if_msghdr
|
|
||||||
|
|
||||||
type IfMsghdr C.struct_if_msghdr8
|
|
||||||
|
|
||||||
type ifData C.struct_if_data
|
|
||||||
|
|
||||||
type IfData C.struct_if_data8
|
|
||||||
|
|
||||||
type IfaMsghdr C.struct_ifa_msghdr
|
|
||||||
|
|
||||||
type IfmaMsghdr C.struct_ifma_msghdr
|
|
||||||
|
|
||||||
type IfAnnounceMsghdr C.struct_if_announcemsghdr
|
|
||||||
|
|
||||||
type RtMsghdr C.struct_rt_msghdr
|
|
||||||
|
|
||||||
type RtMetrics C.struct_rt_metrics
|
|
||||||
|
|
||||||
// Berkeley packet filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
|
||||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
|
||||||
SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf
|
|
||||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
|
||||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
|
||||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
|
||||||
SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header
|
|
||||||
)
|
|
||||||
|
|
||||||
type BpfVersion C.struct_bpf_version
|
|
||||||
|
|
||||||
type BpfStat C.struct_bpf_stat
|
|
||||||
|
|
||||||
type BpfZbuf C.struct_bpf_zbuf
|
|
||||||
|
|
||||||
type BpfProgram C.struct_bpf_program
|
|
||||||
|
|
||||||
type BpfInsn C.struct_bpf_insn
|
|
||||||
|
|
||||||
type BpfHdr C.struct_bpf_hdr
|
|
||||||
|
|
||||||
type BpfZbufHeader C.struct_bpf_zbuf_header
|
|
||||||
|
|
||||||
// Terminal handling
|
|
||||||
|
|
||||||
type Termios C.struct_termios
|
|
||||||
|
|
||||||
type Winsize C.struct_winsize
|
|
||||||
|
|
||||||
// fchmodat-like syscalls.
|
|
||||||
|
|
||||||
const (
|
|
||||||
AT_FDCWD = C.AT_FDCWD
|
|
||||||
AT_REMOVEDIR = C.AT_REMOVEDIR
|
|
||||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
|
||||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
|
||||||
)
|
|
||||||
|
|
||||||
// poll
|
|
||||||
|
|
||||||
type PollFd C.struct_pollfd
|
|
||||||
|
|
||||||
const (
|
|
||||||
POLLERR = C.POLLERR
|
|
||||||
POLLHUP = C.POLLHUP
|
|
||||||
POLLIN = C.POLLIN
|
|
||||||
POLLINIGNEOF = C.POLLINIGNEOF
|
|
||||||
POLLNVAL = C.POLLNVAL
|
|
||||||
POLLOUT = C.POLLOUT
|
|
||||||
POLLPRI = C.POLLPRI
|
|
||||||
POLLRDBAND = C.POLLRDBAND
|
|
||||||
POLLRDNORM = C.POLLRDNORM
|
|
||||||
POLLWRBAND = C.POLLWRBAND
|
|
||||||
POLLWRNORM = C.POLLWRNORM
|
|
||||||
)
|
|
||||||
|
|
||||||
// Capabilities
|
|
||||||
|
|
||||||
type CapRights C.struct_cap_rights
|
|
||||||
|
|
||||||
// Uname
|
|
||||||
|
|
||||||
type Utsname C.struct_utsname
|
|
|
@ -1,290 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
Input to cgo -godefs. See README.md
|
|
||||||
*/
|
|
||||||
|
|
||||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
|
||||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
/*
|
|
||||||
#define KERNEL
|
|
||||||
#include <dirent.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <poll.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <termios.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <sys/param.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/event.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/mount.h>
|
|
||||||
#include <sys/ptrace.h>
|
|
||||||
#include <sys/resource.h>
|
|
||||||
#include <sys/select.h>
|
|
||||||
#include <sys/signal.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/sysctl.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <sys/uio.h>
|
|
||||||
#include <sys/un.h>
|
|
||||||
#include <sys/utsname.h>
|
|
||||||
#include <sys/wait.h>
|
|
||||||
#include <net/bpf.h>
|
|
||||||
#include <net/if.h>
|
|
||||||
#include <net/if_dl.h>
|
|
||||||
#include <net/route.h>
|
|
||||||
#include <netinet/in.h>
|
|
||||||
#include <netinet/icmp6.h>
|
|
||||||
#include <netinet/tcp.h>
|
|
||||||
|
|
||||||
enum {
|
|
||||||
sizeofPtr = sizeof(void*),
|
|
||||||
};
|
|
||||||
|
|
||||||
union sockaddr_all {
|
|
||||||
struct sockaddr s1; // this one gets used for fields
|
|
||||||
struct sockaddr_in s2; // these pad it out
|
|
||||||
struct sockaddr_in6 s3;
|
|
||||||
struct sockaddr_un s4;
|
|
||||||
struct sockaddr_dl s5;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct sockaddr_any {
|
|
||||||
struct sockaddr addr;
|
|
||||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
|
||||||
};
|
|
||||||
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
// Machine characteristics
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofPtr = C.sizeofPtr
|
|
||||||
SizeofShort = C.sizeof_short
|
|
||||||
SizeofInt = C.sizeof_int
|
|
||||||
SizeofLong = C.sizeof_long
|
|
||||||
SizeofLongLong = C.sizeof_longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Basic types
|
|
||||||
|
|
||||||
type (
|
|
||||||
_C_short C.short
|
|
||||||
_C_int C.int
|
|
||||||
_C_long C.long
|
|
||||||
_C_long_long C.longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time
|
|
||||||
|
|
||||||
type Timespec C.struct_timespec
|
|
||||||
|
|
||||||
type Timeval C.struct_timeval
|
|
||||||
|
|
||||||
// Processes
|
|
||||||
|
|
||||||
type Rusage C.struct_rusage
|
|
||||||
|
|
||||||
type Rlimit C.struct_rlimit
|
|
||||||
|
|
||||||
type _Gid_t C.gid_t
|
|
||||||
|
|
||||||
// Files
|
|
||||||
|
|
||||||
type Stat_t C.struct_stat
|
|
||||||
|
|
||||||
type Statfs_t C.struct_statfs
|
|
||||||
|
|
||||||
type Flock_t C.struct_flock
|
|
||||||
|
|
||||||
type Dirent C.struct_dirent
|
|
||||||
|
|
||||||
type Fsid C.fsid_t
|
|
||||||
|
|
||||||
// File system limits
|
|
||||||
|
|
||||||
const (
|
|
||||||
PathMax = C.PATH_MAX
|
|
||||||
)
|
|
||||||
|
|
||||||
// Advice to Fadvise
|
|
||||||
|
|
||||||
const (
|
|
||||||
FADV_NORMAL = C.POSIX_FADV_NORMAL
|
|
||||||
FADV_RANDOM = C.POSIX_FADV_RANDOM
|
|
||||||
FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
|
|
||||||
FADV_WILLNEED = C.POSIX_FADV_WILLNEED
|
|
||||||
FADV_DONTNEED = C.POSIX_FADV_DONTNEED
|
|
||||||
FADV_NOREUSE = C.POSIX_FADV_NOREUSE
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sockets
|
|
||||||
|
|
||||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
|
||||||
|
|
||||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
|
||||||
|
|
||||||
type RawSockaddrUnix C.struct_sockaddr_un
|
|
||||||
|
|
||||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
|
||||||
|
|
||||||
type RawSockaddr C.struct_sockaddr
|
|
||||||
|
|
||||||
type RawSockaddrAny C.struct_sockaddr_any
|
|
||||||
|
|
||||||
type _Socklen C.socklen_t
|
|
||||||
|
|
||||||
type Linger C.struct_linger
|
|
||||||
|
|
||||||
type Iovec C.struct_iovec
|
|
||||||
|
|
||||||
type IPMreq C.struct_ip_mreq
|
|
||||||
|
|
||||||
type IPv6Mreq C.struct_ipv6_mreq
|
|
||||||
|
|
||||||
type Msghdr C.struct_msghdr
|
|
||||||
|
|
||||||
type Cmsghdr C.struct_cmsghdr
|
|
||||||
|
|
||||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
|
||||||
|
|
||||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
|
||||||
|
|
||||||
type ICMPv6Filter C.struct_icmp6_filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
|
||||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
|
||||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
|
||||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
|
||||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
|
||||||
SizeofLinger = C.sizeof_struct_linger
|
|
||||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
|
||||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
|
||||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
|
||||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
|
||||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
|
||||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
|
||||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ptrace requests
|
|
||||||
|
|
||||||
const (
|
|
||||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
|
||||||
PTRACE_CONT = C.PT_CONTINUE
|
|
||||||
PTRACE_KILL = C.PT_KILL
|
|
||||||
)
|
|
||||||
|
|
||||||
// Events (kqueue, kevent)
|
|
||||||
|
|
||||||
type Kevent_t C.struct_kevent
|
|
||||||
|
|
||||||
// Select
|
|
||||||
|
|
||||||
type FdSet C.fd_set
|
|
||||||
|
|
||||||
// Routing and interface messages
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
|
||||||
SizeofIfData = C.sizeof_struct_if_data
|
|
||||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
|
||||||
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
|
|
||||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
|
||||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
|
||||||
)
|
|
||||||
|
|
||||||
type IfMsghdr C.struct_if_msghdr
|
|
||||||
|
|
||||||
type IfData C.struct_if_data
|
|
||||||
|
|
||||||
type IfaMsghdr C.struct_ifa_msghdr
|
|
||||||
|
|
||||||
type IfAnnounceMsghdr C.struct_if_announcemsghdr
|
|
||||||
|
|
||||||
type RtMsghdr C.struct_rt_msghdr
|
|
||||||
|
|
||||||
type RtMetrics C.struct_rt_metrics
|
|
||||||
|
|
||||||
type Mclpool C.struct_mclpool
|
|
||||||
|
|
||||||
// Berkeley packet filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
|
||||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
|
||||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
|
||||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
|
||||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
|
||||||
)
|
|
||||||
|
|
||||||
type BpfVersion C.struct_bpf_version
|
|
||||||
|
|
||||||
type BpfStat C.struct_bpf_stat
|
|
||||||
|
|
||||||
type BpfProgram C.struct_bpf_program
|
|
||||||
|
|
||||||
type BpfInsn C.struct_bpf_insn
|
|
||||||
|
|
||||||
type BpfHdr C.struct_bpf_hdr
|
|
||||||
|
|
||||||
type BpfTimeval C.struct_bpf_timeval
|
|
||||||
|
|
||||||
// Terminal handling
|
|
||||||
|
|
||||||
type Termios C.struct_termios
|
|
||||||
|
|
||||||
type Winsize C.struct_winsize
|
|
||||||
|
|
||||||
type Ptmget C.struct_ptmget
|
|
||||||
|
|
||||||
// fchmodat-like syscalls.
|
|
||||||
|
|
||||||
const (
|
|
||||||
AT_FDCWD = C.AT_FDCWD
|
|
||||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
|
||||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
|
||||||
)
|
|
||||||
|
|
||||||
// poll
|
|
||||||
|
|
||||||
type PollFd C.struct_pollfd
|
|
||||||
|
|
||||||
const (
|
|
||||||
POLLERR = C.POLLERR
|
|
||||||
POLLHUP = C.POLLHUP
|
|
||||||
POLLIN = C.POLLIN
|
|
||||||
POLLNVAL = C.POLLNVAL
|
|
||||||
POLLOUT = C.POLLOUT
|
|
||||||
POLLPRI = C.POLLPRI
|
|
||||||
POLLRDBAND = C.POLLRDBAND
|
|
||||||
POLLRDNORM = C.POLLRDNORM
|
|
||||||
POLLWRBAND = C.POLLWRBAND
|
|
||||||
POLLWRNORM = C.POLLWRNORM
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sysctl
|
|
||||||
|
|
||||||
type Sysctlnode C.struct_sysctlnode
|
|
||||||
|
|
||||||
// Uname
|
|
||||||
|
|
||||||
type Utsname C.struct_utsname
|
|
||||||
|
|
||||||
// Clockinfo
|
|
||||||
|
|
||||||
const SizeofClockinfo = C.sizeof_struct_clockinfo
|
|
||||||
|
|
||||||
type Clockinfo C.struct_clockinfo
|
|
|
@ -1,283 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
Input to cgo -godefs. See README.md
|
|
||||||
*/
|
|
||||||
|
|
||||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
|
||||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
/*
|
|
||||||
#define KERNEL
|
|
||||||
#include <dirent.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <poll.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <termios.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <sys/param.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/event.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/mount.h>
|
|
||||||
#include <sys/ptrace.h>
|
|
||||||
#include <sys/resource.h>
|
|
||||||
#include <sys/select.h>
|
|
||||||
#include <sys/signal.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <sys/uio.h>
|
|
||||||
#include <sys/un.h>
|
|
||||||
#include <sys/utsname.h>
|
|
||||||
#include <sys/wait.h>
|
|
||||||
#include <uvm/uvmexp.h>
|
|
||||||
#include <net/bpf.h>
|
|
||||||
#include <net/if.h>
|
|
||||||
#include <net/if_dl.h>
|
|
||||||
#include <net/route.h>
|
|
||||||
#include <netinet/in.h>
|
|
||||||
#include <netinet/icmp6.h>
|
|
||||||
#include <netinet/tcp.h>
|
|
||||||
|
|
||||||
enum {
|
|
||||||
sizeofPtr = sizeof(void*),
|
|
||||||
};
|
|
||||||
|
|
||||||
union sockaddr_all {
|
|
||||||
struct sockaddr s1; // this one gets used for fields
|
|
||||||
struct sockaddr_in s2; // these pad it out
|
|
||||||
struct sockaddr_in6 s3;
|
|
||||||
struct sockaddr_un s4;
|
|
||||||
struct sockaddr_dl s5;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct sockaddr_any {
|
|
||||||
struct sockaddr addr;
|
|
||||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
|
||||||
};
|
|
||||||
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
// Machine characteristics
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofPtr = C.sizeofPtr
|
|
||||||
SizeofShort = C.sizeof_short
|
|
||||||
SizeofInt = C.sizeof_int
|
|
||||||
SizeofLong = C.sizeof_long
|
|
||||||
SizeofLongLong = C.sizeof_longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Basic types
|
|
||||||
|
|
||||||
type (
|
|
||||||
_C_short C.short
|
|
||||||
_C_int C.int
|
|
||||||
_C_long C.long
|
|
||||||
_C_long_long C.longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time
|
|
||||||
|
|
||||||
type Timespec C.struct_timespec
|
|
||||||
|
|
||||||
type Timeval C.struct_timeval
|
|
||||||
|
|
||||||
// Processes
|
|
||||||
|
|
||||||
type Rusage C.struct_rusage
|
|
||||||
|
|
||||||
type Rlimit C.struct_rlimit
|
|
||||||
|
|
||||||
type _Gid_t C.gid_t
|
|
||||||
|
|
||||||
// Files
|
|
||||||
|
|
||||||
type Stat_t C.struct_stat
|
|
||||||
|
|
||||||
type Statfs_t C.struct_statfs
|
|
||||||
|
|
||||||
type Flock_t C.struct_flock
|
|
||||||
|
|
||||||
type Dirent C.struct_dirent
|
|
||||||
|
|
||||||
type Fsid C.fsid_t
|
|
||||||
|
|
||||||
// File system limits
|
|
||||||
|
|
||||||
const (
|
|
||||||
PathMax = C.PATH_MAX
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sockets
|
|
||||||
|
|
||||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
|
||||||
|
|
||||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
|
||||||
|
|
||||||
type RawSockaddrUnix C.struct_sockaddr_un
|
|
||||||
|
|
||||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
|
||||||
|
|
||||||
type RawSockaddr C.struct_sockaddr
|
|
||||||
|
|
||||||
type RawSockaddrAny C.struct_sockaddr_any
|
|
||||||
|
|
||||||
type _Socklen C.socklen_t
|
|
||||||
|
|
||||||
type Linger C.struct_linger
|
|
||||||
|
|
||||||
type Iovec C.struct_iovec
|
|
||||||
|
|
||||||
type IPMreq C.struct_ip_mreq
|
|
||||||
|
|
||||||
type IPv6Mreq C.struct_ipv6_mreq
|
|
||||||
|
|
||||||
type Msghdr C.struct_msghdr
|
|
||||||
|
|
||||||
type Cmsghdr C.struct_cmsghdr
|
|
||||||
|
|
||||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
|
||||||
|
|
||||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
|
||||||
|
|
||||||
type ICMPv6Filter C.struct_icmp6_filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
|
||||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
|
||||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
|
||||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
|
||||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
|
||||||
SizeofLinger = C.sizeof_struct_linger
|
|
||||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
|
||||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
|
||||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
|
||||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
|
||||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
|
||||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
|
||||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ptrace requests
|
|
||||||
|
|
||||||
const (
|
|
||||||
PTRACE_TRACEME = C.PT_TRACE_ME
|
|
||||||
PTRACE_CONT = C.PT_CONTINUE
|
|
||||||
PTRACE_KILL = C.PT_KILL
|
|
||||||
)
|
|
||||||
|
|
||||||
// Events (kqueue, kevent)
|
|
||||||
|
|
||||||
type Kevent_t C.struct_kevent
|
|
||||||
|
|
||||||
// Select
|
|
||||||
|
|
||||||
type FdSet C.fd_set
|
|
||||||
|
|
||||||
// Routing and interface messages
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
|
||||||
SizeofIfData = C.sizeof_struct_if_data
|
|
||||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
|
||||||
SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
|
|
||||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
|
||||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
|
||||||
)
|
|
||||||
|
|
||||||
type IfMsghdr C.struct_if_msghdr
|
|
||||||
|
|
||||||
type IfData C.struct_if_data
|
|
||||||
|
|
||||||
type IfaMsghdr C.struct_ifa_msghdr
|
|
||||||
|
|
||||||
type IfAnnounceMsghdr C.struct_if_announcemsghdr
|
|
||||||
|
|
||||||
type RtMsghdr C.struct_rt_msghdr
|
|
||||||
|
|
||||||
type RtMetrics C.struct_rt_metrics
|
|
||||||
|
|
||||||
type Mclpool C.struct_mclpool
|
|
||||||
|
|
||||||
// Berkeley packet filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
|
||||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
|
||||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
|
||||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
|
||||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
|
||||||
)
|
|
||||||
|
|
||||||
type BpfVersion C.struct_bpf_version
|
|
||||||
|
|
||||||
type BpfStat C.struct_bpf_stat
|
|
||||||
|
|
||||||
type BpfProgram C.struct_bpf_program
|
|
||||||
|
|
||||||
type BpfInsn C.struct_bpf_insn
|
|
||||||
|
|
||||||
type BpfHdr C.struct_bpf_hdr
|
|
||||||
|
|
||||||
type BpfTimeval C.struct_bpf_timeval
|
|
||||||
|
|
||||||
// Terminal handling
|
|
||||||
|
|
||||||
type Termios C.struct_termios
|
|
||||||
|
|
||||||
type Winsize C.struct_winsize
|
|
||||||
|
|
||||||
// fchmodat-like syscalls.
|
|
||||||
|
|
||||||
const (
|
|
||||||
AT_FDCWD = C.AT_FDCWD
|
|
||||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
|
||||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
|
||||||
)
|
|
||||||
|
|
||||||
// poll
|
|
||||||
|
|
||||||
type PollFd C.struct_pollfd
|
|
||||||
|
|
||||||
const (
|
|
||||||
POLLERR = C.POLLERR
|
|
||||||
POLLHUP = C.POLLHUP
|
|
||||||
POLLIN = C.POLLIN
|
|
||||||
POLLNVAL = C.POLLNVAL
|
|
||||||
POLLOUT = C.POLLOUT
|
|
||||||
POLLPRI = C.POLLPRI
|
|
||||||
POLLRDBAND = C.POLLRDBAND
|
|
||||||
POLLRDNORM = C.POLLRDNORM
|
|
||||||
POLLWRBAND = C.POLLWRBAND
|
|
||||||
POLLWRNORM = C.POLLWRNORM
|
|
||||||
)
|
|
||||||
|
|
||||||
// Signal Sets
|
|
||||||
|
|
||||||
type Sigset_t C.sigset_t
|
|
||||||
|
|
||||||
// Uname
|
|
||||||
|
|
||||||
type Utsname C.struct_utsname
|
|
||||||
|
|
||||||
// Uvmexp
|
|
||||||
|
|
||||||
const SizeofUvmexp = C.sizeof_struct_uvmexp
|
|
||||||
|
|
||||||
type Uvmexp C.struct_uvmexp
|
|
||||||
|
|
||||||
// Clockinfo
|
|
||||||
|
|
||||||
const SizeofClockinfo = C.sizeof_struct_clockinfo
|
|
||||||
|
|
||||||
type Clockinfo C.struct_clockinfo
|
|
|
@ -1,266 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
/*
|
|
||||||
Input to cgo -godefs. See README.md
|
|
||||||
*/
|
|
||||||
|
|
||||||
// +godefs map struct_in_addr [4]byte /* in_addr */
|
|
||||||
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
|
||||||
|
|
||||||
package unix
|
|
||||||
|
|
||||||
/*
|
|
||||||
#define KERNEL
|
|
||||||
// These defines ensure that builds done on newer versions of Solaris are
|
|
||||||
// backwards-compatible with older versions of Solaris and
|
|
||||||
// OpenSolaris-based derivatives.
|
|
||||||
#define __USE_SUNOS_SOCKETS__ // msghdr
|
|
||||||
#define __USE_LEGACY_PROTOTYPES__ // iovec
|
|
||||||
#include <dirent.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <netdb.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <poll.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <termios.h>
|
|
||||||
#include <termio.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/mount.h>
|
|
||||||
#include <sys/param.h>
|
|
||||||
#include <sys/resource.h>
|
|
||||||
#include <sys/select.h>
|
|
||||||
#include <sys/signal.h>
|
|
||||||
#include <sys/socket.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <sys/statvfs.h>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <sys/times.h>
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/utsname.h>
|
|
||||||
#include <sys/un.h>
|
|
||||||
#include <sys/wait.h>
|
|
||||||
#include <net/bpf.h>
|
|
||||||
#include <net/if.h>
|
|
||||||
#include <net/if_dl.h>
|
|
||||||
#include <net/route.h>
|
|
||||||
#include <netinet/in.h>
|
|
||||||
#include <netinet/icmp6.h>
|
|
||||||
#include <netinet/tcp.h>
|
|
||||||
#include <ustat.h>
|
|
||||||
#include <utime.h>
|
|
||||||
|
|
||||||
enum {
|
|
||||||
sizeofPtr = sizeof(void*),
|
|
||||||
};
|
|
||||||
|
|
||||||
union sockaddr_all {
|
|
||||||
struct sockaddr s1; // this one gets used for fields
|
|
||||||
struct sockaddr_in s2; // these pad it out
|
|
||||||
struct sockaddr_in6 s3;
|
|
||||||
struct sockaddr_un s4;
|
|
||||||
struct sockaddr_dl s5;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct sockaddr_any {
|
|
||||||
struct sockaddr addr;
|
|
||||||
char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
|
|
||||||
};
|
|
||||||
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
// Machine characteristics
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofPtr = C.sizeofPtr
|
|
||||||
SizeofShort = C.sizeof_short
|
|
||||||
SizeofInt = C.sizeof_int
|
|
||||||
SizeofLong = C.sizeof_long
|
|
||||||
SizeofLongLong = C.sizeof_longlong
|
|
||||||
PathMax = C.PATH_MAX
|
|
||||||
MaxHostNameLen = C.MAXHOSTNAMELEN
|
|
||||||
)
|
|
||||||
|
|
||||||
// Basic types
|
|
||||||
|
|
||||||
type (
|
|
||||||
_C_short C.short
|
|
||||||
_C_int C.int
|
|
||||||
_C_long C.long
|
|
||||||
_C_long_long C.longlong
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time
|
|
||||||
|
|
||||||
type Timespec C.struct_timespec
|
|
||||||
|
|
||||||
type Timeval C.struct_timeval
|
|
||||||
|
|
||||||
type Timeval32 C.struct_timeval32
|
|
||||||
|
|
||||||
type Tms C.struct_tms
|
|
||||||
|
|
||||||
type Utimbuf C.struct_utimbuf
|
|
||||||
|
|
||||||
// Processes
|
|
||||||
|
|
||||||
type Rusage C.struct_rusage
|
|
||||||
|
|
||||||
type Rlimit C.struct_rlimit
|
|
||||||
|
|
||||||
type _Gid_t C.gid_t
|
|
||||||
|
|
||||||
// Files
|
|
||||||
|
|
||||||
type Stat_t C.struct_stat
|
|
||||||
|
|
||||||
type Flock_t C.struct_flock
|
|
||||||
|
|
||||||
type Dirent C.struct_dirent
|
|
||||||
|
|
||||||
// Filesystems
|
|
||||||
|
|
||||||
type _Fsblkcnt_t C.fsblkcnt_t
|
|
||||||
|
|
||||||
type Statvfs_t C.struct_statvfs
|
|
||||||
|
|
||||||
// Sockets
|
|
||||||
|
|
||||||
type RawSockaddrInet4 C.struct_sockaddr_in
|
|
||||||
|
|
||||||
type RawSockaddrInet6 C.struct_sockaddr_in6
|
|
||||||
|
|
||||||
type RawSockaddrUnix C.struct_sockaddr_un
|
|
||||||
|
|
||||||
type RawSockaddrDatalink C.struct_sockaddr_dl
|
|
||||||
|
|
||||||
type RawSockaddr C.struct_sockaddr
|
|
||||||
|
|
||||||
type RawSockaddrAny C.struct_sockaddr_any
|
|
||||||
|
|
||||||
type _Socklen C.socklen_t
|
|
||||||
|
|
||||||
type Linger C.struct_linger
|
|
||||||
|
|
||||||
type Iovec C.struct_iovec
|
|
||||||
|
|
||||||
type IPMreq C.struct_ip_mreq
|
|
||||||
|
|
||||||
type IPv6Mreq C.struct_ipv6_mreq
|
|
||||||
|
|
||||||
type Msghdr C.struct_msghdr
|
|
||||||
|
|
||||||
type Cmsghdr C.struct_cmsghdr
|
|
||||||
|
|
||||||
type Inet6Pktinfo C.struct_in6_pktinfo
|
|
||||||
|
|
||||||
type IPv6MTUInfo C.struct_ip6_mtuinfo
|
|
||||||
|
|
||||||
type ICMPv6Filter C.struct_icmp6_filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
|
|
||||||
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
|
||||||
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
|
|
||||||
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
|
|
||||||
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
|
|
||||||
SizeofLinger = C.sizeof_struct_linger
|
|
||||||
SizeofIPMreq = C.sizeof_struct_ip_mreq
|
|
||||||
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
|
||||||
SizeofMsghdr = C.sizeof_struct_msghdr
|
|
||||||
SizeofCmsghdr = C.sizeof_struct_cmsghdr
|
|
||||||
SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
|
||||||
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
|
|
||||||
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
// Select
|
|
||||||
|
|
||||||
type FdSet C.fd_set
|
|
||||||
|
|
||||||
// Misc
|
|
||||||
|
|
||||||
type Utsname C.struct_utsname
|
|
||||||
|
|
||||||
type Ustat_t C.struct_ustat
|
|
||||||
|
|
||||||
const (
|
|
||||||
AT_FDCWD = C.AT_FDCWD
|
|
||||||
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
|
|
||||||
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
|
|
||||||
AT_REMOVEDIR = C.AT_REMOVEDIR
|
|
||||||
AT_EACCESS = C.AT_EACCESS
|
|
||||||
)
|
|
||||||
|
|
||||||
// Routing and interface messages
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofIfMsghdr = C.sizeof_struct_if_msghdr
|
|
||||||
SizeofIfData = C.sizeof_struct_if_data
|
|
||||||
SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
|
|
||||||
SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
|
|
||||||
SizeofRtMetrics = C.sizeof_struct_rt_metrics
|
|
||||||
)
|
|
||||||
|
|
||||||
type IfMsghdr C.struct_if_msghdr
|
|
||||||
|
|
||||||
type IfData C.struct_if_data
|
|
||||||
|
|
||||||
type IfaMsghdr C.struct_ifa_msghdr
|
|
||||||
|
|
||||||
type RtMsghdr C.struct_rt_msghdr
|
|
||||||
|
|
||||||
type RtMetrics C.struct_rt_metrics
|
|
||||||
|
|
||||||
// Berkeley packet filter
|
|
||||||
|
|
||||||
const (
|
|
||||||
SizeofBpfVersion = C.sizeof_struct_bpf_version
|
|
||||||
SizeofBpfStat = C.sizeof_struct_bpf_stat
|
|
||||||
SizeofBpfProgram = C.sizeof_struct_bpf_program
|
|
||||||
SizeofBpfInsn = C.sizeof_struct_bpf_insn
|
|
||||||
SizeofBpfHdr = C.sizeof_struct_bpf_hdr
|
|
||||||
)
|
|
||||||
|
|
||||||
type BpfVersion C.struct_bpf_version
|
|
||||||
|
|
||||||
type BpfStat C.struct_bpf_stat
|
|
||||||
|
|
||||||
type BpfProgram C.struct_bpf_program
|
|
||||||
|
|
||||||
type BpfInsn C.struct_bpf_insn
|
|
||||||
|
|
||||||
type BpfTimeval C.struct_bpf_timeval
|
|
||||||
|
|
||||||
type BpfHdr C.struct_bpf_hdr
|
|
||||||
|
|
||||||
// Terminal handling
|
|
||||||
|
|
||||||
type Termios C.struct_termios
|
|
||||||
|
|
||||||
type Termio C.struct_termio
|
|
||||||
|
|
||||||
type Winsize C.struct_winsize
|
|
||||||
|
|
||||||
// poll
|
|
||||||
|
|
||||||
type PollFd C.struct_pollfd
|
|
||||||
|
|
||||||
const (
|
|
||||||
POLLERR = C.POLLERR
|
|
||||||
POLLHUP = C.POLLHUP
|
|
||||||
POLLIN = C.POLLIN
|
|
||||||
POLLNVAL = C.POLLNVAL
|
|
||||||
POLLOUT = C.POLLOUT
|
|
||||||
POLLPRI = C.POLLPRI
|
|
||||||
POLLRDBAND = C.POLLRDBAND
|
|
||||||
POLLRDNORM = C.POLLRDNORM
|
|
||||||
POLLWRBAND = C.POLLWRBAND
|
|
||||||
POLLWRNORM = C.POLLWRNORM
|
|
||||||
)
|
|
|
@ -1,556 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/encoding"
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
)
|
|
||||||
|
|
||||||
const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" +
|
|
||||||
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
|
|
||||||
` !"#$%&'()*+,-./0123456789:;<=>?` +
|
|
||||||
`@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` +
|
|
||||||
"`abcdefghijklmnopqrstuvwxyz{|}~\u007f"
|
|
||||||
|
|
||||||
var encodings = []struct {
|
|
||||||
name string
|
|
||||||
mib string
|
|
||||||
comment string
|
|
||||||
varName string
|
|
||||||
replacement byte
|
|
||||||
mapping string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"IBM Code Page 037",
|
|
||||||
"IBM037",
|
|
||||||
"",
|
|
||||||
"CodePage037",
|
|
||||||
0x3f,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 437",
|
|
||||||
"PC8CodePage437",
|
|
||||||
"",
|
|
||||||
"CodePage437",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 850",
|
|
||||||
"PC850Multilingual",
|
|
||||||
"",
|
|
||||||
"CodePage850",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 852",
|
|
||||||
"PCp852",
|
|
||||||
"",
|
|
||||||
"CodePage852",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 855",
|
|
||||||
"IBM855",
|
|
||||||
"",
|
|
||||||
"CodePage855",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows Code Page 858", // PC latin1 with Euro
|
|
||||||
"IBM00858",
|
|
||||||
"",
|
|
||||||
"CodePage858",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 860",
|
|
||||||
"IBM860",
|
|
||||||
"",
|
|
||||||
"CodePage860",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 862",
|
|
||||||
"PC862LatinHebrew",
|
|
||||||
"",
|
|
||||||
"CodePage862",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 863",
|
|
||||||
"IBM863",
|
|
||||||
"",
|
|
||||||
"CodePage863",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 865",
|
|
||||||
"IBM865",
|
|
||||||
"",
|
|
||||||
"CodePage865",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 866",
|
|
||||||
"IBM866",
|
|
||||||
"",
|
|
||||||
"CodePage866",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-ibm866.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 1047",
|
|
||||||
"IBM1047",
|
|
||||||
"",
|
|
||||||
"CodePage1047",
|
|
||||||
0x3f,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IBM Code Page 1140",
|
|
||||||
"IBM01140",
|
|
||||||
"",
|
|
||||||
"CodePage1140",
|
|
||||||
0x3f,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-1",
|
|
||||||
"ISOLatin1",
|
|
||||||
"",
|
|
||||||
"ISO8859_1",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-2",
|
|
||||||
"ISOLatin2",
|
|
||||||
"",
|
|
||||||
"ISO8859_2",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-2.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-3",
|
|
||||||
"ISOLatin3",
|
|
||||||
"",
|
|
||||||
"ISO8859_3",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-3.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-4",
|
|
||||||
"ISOLatin4",
|
|
||||||
"",
|
|
||||||
"ISO8859_4",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-4.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-5",
|
|
||||||
"ISOLatinCyrillic",
|
|
||||||
"",
|
|
||||||
"ISO8859_5",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-5.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-6",
|
|
||||||
"ISOLatinArabic",
|
|
||||||
"",
|
|
||||||
"ISO8859_6,ISO8859_6E,ISO8859_6I",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-6.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-7",
|
|
||||||
"ISOLatinGreek",
|
|
||||||
"",
|
|
||||||
"ISO8859_7",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-7.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-8",
|
|
||||||
"ISOLatinHebrew",
|
|
||||||
"",
|
|
||||||
"ISO8859_8,ISO8859_8E,ISO8859_8I",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-8.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-9",
|
|
||||||
"ISOLatin5",
|
|
||||||
"",
|
|
||||||
"ISO8859_9",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-10",
|
|
||||||
"ISOLatin6",
|
|
||||||
"",
|
|
||||||
"ISO8859_10",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-10.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-13",
|
|
||||||
"ISO885913",
|
|
||||||
"",
|
|
||||||
"ISO8859_13",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-13.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-14",
|
|
||||||
"ISO885914",
|
|
||||||
"",
|
|
||||||
"ISO8859_14",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-14.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-15",
|
|
||||||
"ISO885915",
|
|
||||||
"",
|
|
||||||
"ISO8859_15",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-15.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ISO 8859-16",
|
|
||||||
"ISO885916",
|
|
||||||
"",
|
|
||||||
"ISO8859_16",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-iso-8859-16.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"KOI8-R",
|
|
||||||
"KOI8R",
|
|
||||||
"",
|
|
||||||
"KOI8R",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-koi8-r.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"KOI8-U",
|
|
||||||
"KOI8U",
|
|
||||||
"",
|
|
||||||
"KOI8U",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-koi8-u.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Macintosh",
|
|
||||||
"Macintosh",
|
|
||||||
"",
|
|
||||||
"Macintosh",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-macintosh.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Macintosh Cyrillic",
|
|
||||||
"MacintoshCyrillic",
|
|
||||||
"",
|
|
||||||
"MacintoshCyrillic",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 874",
|
|
||||||
"Windows874",
|
|
||||||
"",
|
|
||||||
"Windows874",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-874.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1250",
|
|
||||||
"Windows1250",
|
|
||||||
"",
|
|
||||||
"Windows1250",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1250.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1251",
|
|
||||||
"Windows1251",
|
|
||||||
"",
|
|
||||||
"Windows1251",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1251.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1252",
|
|
||||||
"Windows1252",
|
|
||||||
"",
|
|
||||||
"Windows1252",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1252.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1253",
|
|
||||||
"Windows1253",
|
|
||||||
"",
|
|
||||||
"Windows1253",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1253.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1254",
|
|
||||||
"Windows1254",
|
|
||||||
"",
|
|
||||||
"Windows1254",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1254.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1255",
|
|
||||||
"Windows1255",
|
|
||||||
"",
|
|
||||||
"Windows1255",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1255.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1256",
|
|
||||||
"Windows1256",
|
|
||||||
"",
|
|
||||||
"Windows1256",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1256.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1257",
|
|
||||||
"Windows1257",
|
|
||||||
"",
|
|
||||||
"Windows1257",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1257.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Windows 1258",
|
|
||||||
"Windows1258",
|
|
||||||
"",
|
|
||||||
"Windows1258",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
"http://encoding.spec.whatwg.org/index-windows-1258.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"X-User-Defined",
|
|
||||||
"XUserDefined",
|
|
||||||
"It is defined at http://encoding.spec.whatwg.org/#x-user-defined",
|
|
||||||
"XUserDefined",
|
|
||||||
encoding.ASCIISub,
|
|
||||||
ascii +
|
|
||||||
"\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" +
|
|
||||||
"\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" +
|
|
||||||
"\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" +
|
|
||||||
"\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" +
|
|
||||||
"\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" +
|
|
||||||
"\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" +
|
|
||||||
"\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" +
|
|
||||||
"\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" +
|
|
||||||
"\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" +
|
|
||||||
"\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" +
|
|
||||||
"\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" +
|
|
||||||
"\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" +
|
|
||||||
"\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" +
|
|
||||||
"\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" +
|
|
||||||
"\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" +
|
|
||||||
"\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func getWHATWG(url string) string {
|
|
||||||
res, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("%q: Get: %v", url, err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
mapping := make([]rune, 128)
|
|
||||||
for i := range mapping {
|
|
||||||
mapping[i] = '\ufffd'
|
|
||||||
}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(res.Body)
|
|
||||||
for scanner.Scan() {
|
|
||||||
s := strings.TrimSpace(scanner.Text())
|
|
||||||
if s == "" || s[0] == '#' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
x, y := 0, 0
|
|
||||||
if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil {
|
|
||||||
log.Fatalf("could not parse %q", s)
|
|
||||||
}
|
|
||||||
if x < 0 || 128 <= x {
|
|
||||||
log.Fatalf("code %d is out of range", x)
|
|
||||||
}
|
|
||||||
if 0x80 <= y && y < 0xa0 {
|
|
||||||
// We diverge from the WHATWG spec by mapping control characters
|
|
||||||
// in the range [0x80, 0xa0) to U+FFFD.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mapping[x] = rune(y)
|
|
||||||
}
|
|
||||||
return ascii + string(mapping)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUCM(url string) string {
|
|
||||||
res, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("%q: Get: %v", url, err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
mapping := make([]rune, 256)
|
|
||||||
for i := range mapping {
|
|
||||||
mapping[i] = '\ufffd'
|
|
||||||
}
|
|
||||||
|
|
||||||
charsFound := 0
|
|
||||||
scanner := bufio.NewScanner(res.Body)
|
|
||||||
for scanner.Scan() {
|
|
||||||
s := strings.TrimSpace(scanner.Text())
|
|
||||||
if s == "" || s[0] == '#' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var c byte
|
|
||||||
var r rune
|
|
||||||
if _, err := fmt.Sscanf(s, `<U%x> \x%x |0`, &r, &c); err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
mapping[c] = r
|
|
||||||
charsFound++
|
|
||||||
}
|
|
||||||
|
|
||||||
if charsFound < 200 {
|
|
||||||
log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(mapping)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
mibs := map[string]bool{}
|
|
||||||
all := []string{}
|
|
||||||
|
|
||||||
w := gen.NewCodeWriter()
|
|
||||||
defer w.WriteGoFile("tables.go", "charmap")
|
|
||||||
|
|
||||||
printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) }
|
|
||||||
|
|
||||||
printf("import (\n")
|
|
||||||
printf("\t\"golang.org/x/text/encoding\"\n")
|
|
||||||
printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n")
|
|
||||||
printf(")\n\n")
|
|
||||||
for _, e := range encodings {
|
|
||||||
varNames := strings.Split(e.varName, ",")
|
|
||||||
all = append(all, varNames...)
|
|
||||||
varName := varNames[0]
|
|
||||||
switch {
|
|
||||||
case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"):
|
|
||||||
e.mapping = getWHATWG(e.mapping)
|
|
||||||
case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"):
|
|
||||||
e.mapping = getUCM(e.mapping)
|
|
||||||
}
|
|
||||||
|
|
||||||
asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00
|
|
||||||
if asciiSuperset {
|
|
||||||
low = 0x80
|
|
||||||
}
|
|
||||||
lvn := 1
|
|
||||||
if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") {
|
|
||||||
lvn = 3
|
|
||||||
}
|
|
||||||
lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:]
|
|
||||||
printf("// %s is the %s encoding.\n", varName, e.name)
|
|
||||||
if e.comment != "" {
|
|
||||||
printf("//\n// %s\n", e.comment)
|
|
||||||
}
|
|
||||||
printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n",
|
|
||||||
varName, lowerVarName, lowerVarName, e.name)
|
|
||||||
if mibs[e.mib] {
|
|
||||||
log.Fatalf("MIB type %q declared multiple times.", e.mib)
|
|
||||||
}
|
|
||||||
printf("mib: identifier.%s,\n", e.mib)
|
|
||||||
printf("asciiSuperset: %t,\n", asciiSuperset)
|
|
||||||
printf("low: 0x%02x,\n", low)
|
|
||||||
printf("replacement: 0x%02x,\n", e.replacement)
|
|
||||||
|
|
||||||
printf("decode: [256]utf8Enc{\n")
|
|
||||||
i, backMapping := 0, map[rune]byte{}
|
|
||||||
for _, c := range e.mapping {
|
|
||||||
if _, ok := backMapping[c]; !ok && c != utf8.RuneError {
|
|
||||||
backMapping[c] = byte(i)
|
|
||||||
}
|
|
||||||
var buf [8]byte
|
|
||||||
n := utf8.EncodeRune(buf[:], c)
|
|
||||||
if n > 3 {
|
|
||||||
panic(fmt.Sprintf("rune %q (%U) is too long", c, c))
|
|
||||||
}
|
|
||||||
printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2])
|
|
||||||
if i%2 == 1 {
|
|
||||||
printf("\n")
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
printf("},\n")
|
|
||||||
|
|
||||||
printf("encode: [256]uint32{\n")
|
|
||||||
encode := make([]uint32, 0, 256)
|
|
||||||
for c, i := range backMapping {
|
|
||||||
encode = append(encode, uint32(i)<<24|uint32(c))
|
|
||||||
}
|
|
||||||
sort.Sort(byRune(encode))
|
|
||||||
for len(encode) < cap(encode) {
|
|
||||||
encode = append(encode, encode[len(encode)-1])
|
|
||||||
}
|
|
||||||
for i, enc := range encode {
|
|
||||||
printf("0x%08x,", enc)
|
|
||||||
if i%8 == 7 {
|
|
||||||
printf("\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printf("},\n}\n")
|
|
||||||
|
|
||||||
// Add an estimate of the size of a single Charmap{} struct value, which
|
|
||||||
// includes two 256 elem arrays of 4 bytes and some extra fields, which
|
|
||||||
// align to 3 uint64s on 64-bit architectures.
|
|
||||||
w.Size += 2*4*256 + 3*8
|
|
||||||
}
|
|
||||||
// TODO: add proper line breaking.
|
|
||||||
printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
type byRune []uint32
|
|
||||||
|
|
||||||
func (b byRune) Len() int { return len(b) }
|
|
||||||
func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff }
|
|
||||||
func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|
|
@ -1,173 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
)
|
|
||||||
|
|
||||||
type group struct {
|
|
||||||
Encodings []struct {
|
|
||||||
Labels []string
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
gen.Init()
|
|
||||||
|
|
||||||
r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json")
|
|
||||||
var groups []group
|
|
||||||
if err := json.NewDecoder(r).Decode(&groups); err != nil {
|
|
||||||
log.Fatalf("Error reading encodings.json: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w := &bytes.Buffer{}
|
|
||||||
fmt.Fprintln(w, "type htmlEncoding byte")
|
|
||||||
fmt.Fprintln(w, "const (")
|
|
||||||
for i, g := range groups {
|
|
||||||
for _, e := range g.Encodings {
|
|
||||||
key := strings.ToLower(e.Name)
|
|
||||||
name := consts[key]
|
|
||||||
if name == "" {
|
|
||||||
log.Fatalf("No const defined for %s.", key)
|
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
fmt.Fprintf(w, "%s htmlEncoding = iota\n", name)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(w, "%s\n", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, "numEncodings")
|
|
||||||
fmt.Fprint(w, ")\n\n")
|
|
||||||
|
|
||||||
fmt.Fprintln(w, "var canonical = [numEncodings]string{")
|
|
||||||
for _, g := range groups {
|
|
||||||
for _, e := range g.Encodings {
|
|
||||||
fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, "}\n\n")
|
|
||||||
|
|
||||||
fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{")
|
|
||||||
for _, g := range groups {
|
|
||||||
for _, e := range g.Encodings {
|
|
||||||
for _, l := range e.Labels {
|
|
||||||
key := strings.ToLower(e.Name)
|
|
||||||
name := consts[key]
|
|
||||||
fmt.Fprintf(w, "%q: %s,\n", l, name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, "}\n\n")
|
|
||||||
|
|
||||||
var tags []string
|
|
||||||
fmt.Fprintln(w, "var localeMap = []htmlEncoding{")
|
|
||||||
for _, loc := range locales {
|
|
||||||
tags = append(tags, loc.tag)
|
|
||||||
fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag)
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, "}\n\n")
|
|
||||||
|
|
||||||
fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " "))
|
|
||||||
|
|
||||||
gen.WriteGoFile("tables.go", "htmlindex", w.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
// consts maps canonical encoding name to internal constant.
|
|
||||||
var consts = map[string]string{
|
|
||||||
"utf-8": "utf8",
|
|
||||||
"ibm866": "ibm866",
|
|
||||||
"iso-8859-2": "iso8859_2",
|
|
||||||
"iso-8859-3": "iso8859_3",
|
|
||||||
"iso-8859-4": "iso8859_4",
|
|
||||||
"iso-8859-5": "iso8859_5",
|
|
||||||
"iso-8859-6": "iso8859_6",
|
|
||||||
"iso-8859-7": "iso8859_7",
|
|
||||||
"iso-8859-8": "iso8859_8",
|
|
||||||
"iso-8859-8-i": "iso8859_8I",
|
|
||||||
"iso-8859-10": "iso8859_10",
|
|
||||||
"iso-8859-13": "iso8859_13",
|
|
||||||
"iso-8859-14": "iso8859_14",
|
|
||||||
"iso-8859-15": "iso8859_15",
|
|
||||||
"iso-8859-16": "iso8859_16",
|
|
||||||
"koi8-r": "koi8r",
|
|
||||||
"koi8-u": "koi8u",
|
|
||||||
"macintosh": "macintosh",
|
|
||||||
"windows-874": "windows874",
|
|
||||||
"windows-1250": "windows1250",
|
|
||||||
"windows-1251": "windows1251",
|
|
||||||
"windows-1252": "windows1252",
|
|
||||||
"windows-1253": "windows1253",
|
|
||||||
"windows-1254": "windows1254",
|
|
||||||
"windows-1255": "windows1255",
|
|
||||||
"windows-1256": "windows1256",
|
|
||||||
"windows-1257": "windows1257",
|
|
||||||
"windows-1258": "windows1258",
|
|
||||||
"x-mac-cyrillic": "macintoshCyrillic",
|
|
||||||
"gbk": "gbk",
|
|
||||||
"gb18030": "gb18030",
|
|
||||||
// "hz-gb-2312": "hzgb2312", // Was removed from WhatWG
|
|
||||||
"big5": "big5",
|
|
||||||
"euc-jp": "eucjp",
|
|
||||||
"iso-2022-jp": "iso2022jp",
|
|
||||||
"shift_jis": "shiftJIS",
|
|
||||||
"euc-kr": "euckr",
|
|
||||||
"replacement": "replacement",
|
|
||||||
"utf-16be": "utf16be",
|
|
||||||
"utf-16le": "utf16le",
|
|
||||||
"x-user-defined": "xUserDefined",
|
|
||||||
}
|
|
||||||
|
|
||||||
// locales is taken from
|
|
||||||
// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
|
|
||||||
var locales = []struct{ tag, name string }{
|
|
||||||
// The default value. Explicitly state latin to benefit from the exact
|
|
||||||
// script option, while still making 1252 the default encoding for languages
|
|
||||||
// written in Latin script.
|
|
||||||
{"und_Latn", "windows-1252"},
|
|
||||||
{"ar", "windows-1256"},
|
|
||||||
{"ba", "windows-1251"},
|
|
||||||
{"be", "windows-1251"},
|
|
||||||
{"bg", "windows-1251"},
|
|
||||||
{"cs", "windows-1250"},
|
|
||||||
{"el", "iso-8859-7"},
|
|
||||||
{"et", "windows-1257"},
|
|
||||||
{"fa", "windows-1256"},
|
|
||||||
{"he", "windows-1255"},
|
|
||||||
{"hr", "windows-1250"},
|
|
||||||
{"hu", "iso-8859-2"},
|
|
||||||
{"ja", "shift_jis"},
|
|
||||||
{"kk", "windows-1251"},
|
|
||||||
{"ko", "euc-kr"},
|
|
||||||
{"ku", "windows-1254"},
|
|
||||||
{"ky", "windows-1251"},
|
|
||||||
{"lt", "windows-1257"},
|
|
||||||
{"lv", "windows-1257"},
|
|
||||||
{"mk", "windows-1251"},
|
|
||||||
{"pl", "iso-8859-2"},
|
|
||||||
{"ru", "windows-1251"},
|
|
||||||
{"sah", "windows-1251"},
|
|
||||||
{"sk", "windows-1250"},
|
|
||||||
{"sl", "iso-8859-2"},
|
|
||||||
{"sr", "windows-1251"},
|
|
||||||
{"tg", "windows-1251"},
|
|
||||||
{"th", "windows-874"},
|
|
||||||
{"tr", "windows-1254"},
|
|
||||||
{"tt", "windows-1251"},
|
|
||||||
{"uk", "windows-1251"},
|
|
||||||
{"vi", "windows-1258"},
|
|
||||||
{"zh-hans", "gb18030"},
|
|
||||||
{"zh-hant", "big5"},
|
|
||||||
}
|
|
|
@ -1,142 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
)
|
|
||||||
|
|
||||||
type registry struct {
|
|
||||||
XMLName xml.Name `xml:"registry"`
|
|
||||||
Updated string `xml:"updated"`
|
|
||||||
Registry []struct {
|
|
||||||
ID string `xml:"id,attr"`
|
|
||||||
Record []struct {
|
|
||||||
Name string `xml:"name"`
|
|
||||||
Xref []struct {
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
Data string `xml:"data,attr"`
|
|
||||||
} `xml:"xref"`
|
|
||||||
Desc struct {
|
|
||||||
Data string `xml:",innerxml"`
|
|
||||||
// Any []struct {
|
|
||||||
// Data string `xml:",chardata"`
|
|
||||||
// } `xml:",any"`
|
|
||||||
// Data string `xml:",chardata"`
|
|
||||||
} `xml:"description,"`
|
|
||||||
MIB string `xml:"value"`
|
|
||||||
Alias []string `xml:"alias"`
|
|
||||||
MIME string `xml:"preferred_alias"`
|
|
||||||
} `xml:"record"`
|
|
||||||
} `xml:"registry"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
|
|
||||||
reg := ®istry{}
|
|
||||||
if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF {
|
|
||||||
log.Fatalf("Error decoding charset registry: %v", err)
|
|
||||||
}
|
|
||||||
if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
|
|
||||||
log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
w := &bytes.Buffer{}
|
|
||||||
fmt.Fprintf(w, "const (\n")
|
|
||||||
for _, rec := range reg.Registry[0].Record {
|
|
||||||
constName := ""
|
|
||||||
for _, a := range rec.Alias {
|
|
||||||
if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
|
|
||||||
// Some of the constant definitions have comments in them. Strip those.
|
|
||||||
constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if constName == "" {
|
|
||||||
switch rec.MIB {
|
|
||||||
case "2085":
|
|
||||||
constName = "HZGB2312" // Not listed as alias for some reason.
|
|
||||||
default:
|
|
||||||
log.Fatalf("No cs alias defined for %s.", rec.MIB)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if rec.MIME != "" {
|
|
||||||
rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
|
|
||||||
if len(rec.Desc.Data) > 0 {
|
|
||||||
fmt.Fprint(w, "// ")
|
|
||||||
d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
|
|
||||||
inElem := true
|
|
||||||
attr := ""
|
|
||||||
for {
|
|
||||||
t, err := d.Token()
|
|
||||||
if err != nil {
|
|
||||||
if err != io.EOF {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
switch x := t.(type) {
|
|
||||||
case xml.CharData:
|
|
||||||
attr = "" // Don't need attribute info.
|
|
||||||
a := bytes.Split([]byte(x), []byte("\n"))
|
|
||||||
for i, b := range a {
|
|
||||||
if b = bytes.TrimSpace(b); len(b) != 0 {
|
|
||||||
if !inElem && i > 0 {
|
|
||||||
fmt.Fprint(w, "\n// ")
|
|
||||||
}
|
|
||||||
inElem = false
|
|
||||||
fmt.Fprintf(w, "%s ", string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case xml.StartElement:
|
|
||||||
if x.Name.Local == "xref" {
|
|
||||||
inElem = true
|
|
||||||
use := false
|
|
||||||
for _, a := range x.Attr {
|
|
||||||
if a.Name.Local == "type" {
|
|
||||||
use = use || a.Value != "person"
|
|
||||||
}
|
|
||||||
if a.Name.Local == "data" && use {
|
|
||||||
// Patch up URLs to use https. From some links, the
|
|
||||||
// https version is different from the http one.
|
|
||||||
s := a.Value
|
|
||||||
s = strings.Replace(s, "http://", "https://", -1)
|
|
||||||
s = strings.Replace(s, "/unicode/", "/", -1)
|
|
||||||
attr = s + " "
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case xml.EndElement:
|
|
||||||
inElem = false
|
|
||||||
fmt.Fprint(w, attr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, "\n")
|
|
||||||
}
|
|
||||||
for _, x := range rec.Xref {
|
|
||||||
switch x.Type {
|
|
||||||
case "rfc":
|
|
||||||
fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
|
|
||||||
case "uri":
|
|
||||||
fmt.Fprintf(w, "// Reference: %s\n", x.Data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, ")")
|
|
||||||
|
|
||||||
gen.WriteGoFile("mib.go", "identifier", w.Bytes())
|
|
||||||
}
|
|
|
@ -1,161 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// This program generates tables.go:
|
|
||||||
// go run maketables.go | gofmt > tables.go
|
|
||||||
|
|
||||||
// TODO: Emoji extensions?
|
|
||||||
// https://www.unicode.org/faq/emoji_dingbats.html
|
|
||||||
// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type entry struct {
|
|
||||||
jisCode, table int
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
|
||||||
fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n")
|
|
||||||
fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n")
|
|
||||||
|
|
||||||
reverse := [65536]entry{}
|
|
||||||
for i := range reverse {
|
|
||||||
reverse[i].table = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
tables := []struct {
|
|
||||||
url string
|
|
||||||
name string
|
|
||||||
}{
|
|
||||||
{"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"},
|
|
||||||
{"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"},
|
|
||||||
}
|
|
||||||
for i, table := range tables {
|
|
||||||
res, err := http.Get(table.url)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("%q: Get: %v", table.url, err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
mapping := [65536]uint16{}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(res.Body)
|
|
||||||
for scanner.Scan() {
|
|
||||||
s := strings.TrimSpace(scanner.Text())
|
|
||||||
if s == "" || s[0] == '#' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
x, y := 0, uint16(0)
|
|
||||||
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
|
||||||
log.Fatalf("%q: could not parse %q", table.url, s)
|
|
||||||
}
|
|
||||||
if x < 0 || 120*94 <= x {
|
|
||||||
log.Fatalf("%q: JIS code %d is out of range", table.url, x)
|
|
||||||
}
|
|
||||||
mapping[x] = y
|
|
||||||
if reverse[y].table == -1 {
|
|
||||||
reverse[y] = entry{jisCode: x, table: i}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
log.Fatalf("%q: scanner error: %v", table.url, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n",
|
|
||||||
table.name, table.name, table.url)
|
|
||||||
fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name)
|
|
||||||
for i, m := range mapping {
|
|
||||||
if m != 0 {
|
|
||||||
fmt.Printf("\t%d: 0x%04X,\n", i, m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Any run of at least separation continuous zero entries in the reverse map will
|
|
||||||
// be a separate encode table.
|
|
||||||
const separation = 1024
|
|
||||||
|
|
||||||
intervals := []interval(nil)
|
|
||||||
low, high := -1, -1
|
|
||||||
for i, v := range reverse {
|
|
||||||
if v.table == -1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if low < 0 {
|
|
||||||
low = i
|
|
||||||
} else if i-high >= separation {
|
|
||||||
if high >= 0 {
|
|
||||||
intervals = append(intervals, interval{low, high})
|
|
||||||
}
|
|
||||||
low = i
|
|
||||||
}
|
|
||||||
high = i + 1
|
|
||||||
}
|
|
||||||
if high >= 0 {
|
|
||||||
intervals = append(intervals, interval{low, high})
|
|
||||||
}
|
|
||||||
sort.Sort(byDecreasingLength(intervals))
|
|
||||||
|
|
||||||
fmt.Printf("const (\n")
|
|
||||||
fmt.Printf("\tjis0208 = 1\n")
|
|
||||||
fmt.Printf("\tjis0212 = 2\n")
|
|
||||||
fmt.Printf("\tcodeMask = 0x7f\n")
|
|
||||||
fmt.Printf("\tcodeShift = 7\n")
|
|
||||||
fmt.Printf("\ttableShift = 14\n")
|
|
||||||
fmt.Printf(")\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
|
||||||
fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n")
|
|
||||||
fmt.Printf("// sorted by decreasing length.\n")
|
|
||||||
for i, v := range intervals {
|
|
||||||
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
|
|
||||||
}
|
|
||||||
fmt.Printf("//\n")
|
|
||||||
fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n")
|
|
||||||
fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n")
|
|
||||||
fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n")
|
|
||||||
fmt.Printf("// JIS code (94*j1 + j2) within that table.\n")
|
|
||||||
fmt.Printf("\n")
|
|
||||||
|
|
||||||
for i, v := range intervals {
|
|
||||||
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
|
||||||
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
|
||||||
for j := v.low; j < v.high; j++ {
|
|
||||||
x := reverse[j]
|
|
||||||
if x.table == -1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n",
|
|
||||||
j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94)
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// interval is a half-open interval [low, high).
|
|
||||||
type interval struct {
|
|
||||||
low, high int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i interval) len() int { return i.high - i.low }
|
|
||||||
|
|
||||||
// byDecreasingLength sorts intervals by decreasing length.
|
|
||||||
type byDecreasingLength []interval
|
|
||||||
|
|
||||||
func (b byDecreasingLength) Len() int { return len(b) }
|
|
||||||
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
|
||||||
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|
|
@ -1,143 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// This program generates tables.go:
|
|
||||||
// go run maketables.go | gofmt > tables.go
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
|
||||||
fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n")
|
|
||||||
fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n")
|
|
||||||
|
|
||||||
res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Get: %v", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
mapping := [65536]uint16{}
|
|
||||||
reverse := [65536]uint16{}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(res.Body)
|
|
||||||
for scanner.Scan() {
|
|
||||||
s := strings.TrimSpace(scanner.Text())
|
|
||||||
if s == "" || s[0] == '#' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
x, y := uint16(0), uint16(0)
|
|
||||||
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
|
||||||
log.Fatalf("could not parse %q", s)
|
|
||||||
}
|
|
||||||
if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x {
|
|
||||||
log.Fatalf("EUC-KR code %d is out of range", x)
|
|
||||||
}
|
|
||||||
mapping[x] = y
|
|
||||||
if reverse[y] == 0 {
|
|
||||||
c0, c1 := uint16(0), uint16(0)
|
|
||||||
if x < 178*(0xc7-0x81) {
|
|
||||||
c0 = uint16(x/178) + 0x81
|
|
||||||
c1 = uint16(x % 178)
|
|
||||||
switch {
|
|
||||||
case c1 < 1*26:
|
|
||||||
c1 += 0x41
|
|
||||||
case c1 < 2*26:
|
|
||||||
c1 += 0x47
|
|
||||||
default:
|
|
||||||
c1 += 0x4d
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
x -= 178 * (0xc7 - 0x81)
|
|
||||||
c0 = uint16(x/94) + 0xc7
|
|
||||||
c1 = uint16(x%94) + 0xa1
|
|
||||||
}
|
|
||||||
reverse[y] = c0<<8 | c1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
log.Fatalf("scanner error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n")
|
|
||||||
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n")
|
|
||||||
fmt.Printf("var decode = [...]uint16{\n")
|
|
||||||
for i, v := range mapping {
|
|
||||||
if v != 0 {
|
|
||||||
fmt.Printf("\t%d: 0x%04X,\n", i, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
|
|
||||||
// Any run of at least separation continuous zero entries in the reverse map will
|
|
||||||
// be a separate encode table.
|
|
||||||
const separation = 1024
|
|
||||||
|
|
||||||
intervals := []interval(nil)
|
|
||||||
low, high := -1, -1
|
|
||||||
for i, v := range reverse {
|
|
||||||
if v == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if low < 0 {
|
|
||||||
low = i
|
|
||||||
} else if i-high >= separation {
|
|
||||||
if high >= 0 {
|
|
||||||
intervals = append(intervals, interval{low, high})
|
|
||||||
}
|
|
||||||
low = i
|
|
||||||
}
|
|
||||||
high = i + 1
|
|
||||||
}
|
|
||||||
if high >= 0 {
|
|
||||||
intervals = append(intervals, interval{low, high})
|
|
||||||
}
|
|
||||||
sort.Sort(byDecreasingLength(intervals))
|
|
||||||
|
|
||||||
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
|
||||||
fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n")
|
|
||||||
fmt.Printf("// sorted by decreasing length.\n")
|
|
||||||
for i, v := range intervals {
|
|
||||||
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
|
|
||||||
}
|
|
||||||
fmt.Printf("\n")
|
|
||||||
|
|
||||||
for i, v := range intervals {
|
|
||||||
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
|
||||||
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
|
||||||
for j := v.low; j < v.high; j++ {
|
|
||||||
x := reverse[j]
|
|
||||||
if x == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// interval is a half-open interval [low, high).
|
|
||||||
type interval struct {
|
|
||||||
low, high int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i interval) len() int { return i.high - i.low }
|
|
||||||
|
|
||||||
// byDecreasingLength sorts intervals by decreasing length.
|
|
||||||
type byDecreasingLength []interval
|
|
||||||
|
|
||||||
func (b byDecreasingLength) Len() int { return len(b) }
|
|
||||||
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
|
||||||
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|
|
@ -1,161 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// This program generates tables.go:
|
|
||||||
// go run maketables.go | gofmt > tables.go
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
|
||||||
fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n")
|
|
||||||
fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n")
|
|
||||||
|
|
||||||
printGB18030()
|
|
||||||
printGBK()
|
|
||||||
}
|
|
||||||
|
|
||||||
func printGB18030() {
|
|
||||||
res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Get: %v", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n")
|
|
||||||
fmt.Printf("var gb18030 = [...][2]uint16{\n")
|
|
||||||
scanner := bufio.NewScanner(res.Body)
|
|
||||||
for scanner.Scan() {
|
|
||||||
s := strings.TrimSpace(scanner.Text())
|
|
||||||
if s == "" || s[0] == '#' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
x, y := uint32(0), uint32(0)
|
|
||||||
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
|
||||||
log.Fatalf("could not parse %q", s)
|
|
||||||
}
|
|
||||||
if x < 0x10000 && y < 0x10000 {
|
|
||||||
fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func printGBK() {
|
|
||||||
res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Get: %v", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
mapping := [65536]uint16{}
|
|
||||||
reverse := [65536]uint16{}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(res.Body)
|
|
||||||
for scanner.Scan() {
|
|
||||||
s := strings.TrimSpace(scanner.Text())
|
|
||||||
if s == "" || s[0] == '#' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
x, y := uint16(0), uint16(0)
|
|
||||||
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
|
||||||
log.Fatalf("could not parse %q", s)
|
|
||||||
}
|
|
||||||
if x < 0 || 126*190 <= x {
|
|
||||||
log.Fatalf("GBK code %d is out of range", x)
|
|
||||||
}
|
|
||||||
mapping[x] = y
|
|
||||||
if reverse[y] == 0 {
|
|
||||||
c0, c1 := x/190, x%190
|
|
||||||
if c1 >= 0x3f {
|
|
||||||
c1++
|
|
||||||
}
|
|
||||||
reverse[y] = (0x81+c0)<<8 | (0x40 + c1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
log.Fatalf("scanner error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n")
|
|
||||||
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n")
|
|
||||||
fmt.Printf("var decode = [...]uint16{\n")
|
|
||||||
for i, v := range mapping {
|
|
||||||
if v != 0 {
|
|
||||||
fmt.Printf("\t%d: 0x%04X,\n", i, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
|
|
||||||
// Any run of at least separation continuous zero entries in the reverse map will
|
|
||||||
// be a separate encode table.
|
|
||||||
const separation = 1024
|
|
||||||
|
|
||||||
intervals := []interval(nil)
|
|
||||||
low, high := -1, -1
|
|
||||||
for i, v := range reverse {
|
|
||||||
if v == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if low < 0 {
|
|
||||||
low = i
|
|
||||||
} else if i-high >= separation {
|
|
||||||
if high >= 0 {
|
|
||||||
intervals = append(intervals, interval{low, high})
|
|
||||||
}
|
|
||||||
low = i
|
|
||||||
}
|
|
||||||
high = i + 1
|
|
||||||
}
|
|
||||||
if high >= 0 {
|
|
||||||
intervals = append(intervals, interval{low, high})
|
|
||||||
}
|
|
||||||
sort.Sort(byDecreasingLength(intervals))
|
|
||||||
|
|
||||||
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
|
||||||
fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n")
|
|
||||||
fmt.Printf("// sorted by decreasing length.\n")
|
|
||||||
for i, v := range intervals {
|
|
||||||
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
|
|
||||||
}
|
|
||||||
fmt.Printf("\n")
|
|
||||||
|
|
||||||
for i, v := range intervals {
|
|
||||||
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
|
||||||
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
|
||||||
for j := v.low; j < v.high; j++ {
|
|
||||||
x := reverse[j]
|
|
||||||
if x == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// interval is a half-open interval [low, high).
|
|
||||||
type interval struct {
|
|
||||||
low, high int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i interval) len() int { return i.high - i.low }
|
|
||||||
|
|
||||||
// byDecreasingLength sorts intervals by decreasing length.
|
|
||||||
type byDecreasingLength []interval
|
|
||||||
|
|
||||||
func (b byDecreasingLength) Len() int { return len(b) }
|
|
||||||
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
|
||||||
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|
|
@ -1,140 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// This program generates tables.go:
|
|
||||||
// go run maketables.go | gofmt > tables.go
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
|
||||||
fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n")
|
|
||||||
fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n")
|
|
||||||
|
|
||||||
res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Get: %v", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
mapping := [65536]uint32{}
|
|
||||||
reverse := [65536 * 4]uint16{}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(res.Body)
|
|
||||||
for scanner.Scan() {
|
|
||||||
s := strings.TrimSpace(scanner.Text())
|
|
||||||
if s == "" || s[0] == '#' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
x, y := uint16(0), uint32(0)
|
|
||||||
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
|
||||||
log.Fatalf("could not parse %q", s)
|
|
||||||
}
|
|
||||||
if x < 0 || 126*157 <= x {
|
|
||||||
log.Fatalf("Big5 code %d is out of range", x)
|
|
||||||
}
|
|
||||||
mapping[x] = y
|
|
||||||
|
|
||||||
// The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that
|
|
||||||
// "The index pointer for code point in index is the first pointer
|
|
||||||
// corresponding to code point in index", which would normally mean
|
|
||||||
// that the code below should be guarded by "if reverse[y] == 0", but
|
|
||||||
// last instead of first seems to match the behavior of
|
|
||||||
// "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in
|
|
||||||
// http://encoding.spec.whatwg.org/index-big5.txt, as index 2148
|
|
||||||
// (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc")
|
|
||||||
// and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc".
|
|
||||||
c0, c1 := x/157, x%157
|
|
||||||
if c1 < 0x3f {
|
|
||||||
c1 += 0x40
|
|
||||||
} else {
|
|
||||||
c1 += 0x62
|
|
||||||
}
|
|
||||||
reverse[y] = (0x81+c0)<<8 | c1
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
log.Fatalf("scanner error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n")
|
|
||||||
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n")
|
|
||||||
fmt.Printf("var decode = [...]uint32{\n")
|
|
||||||
for i, v := range mapping {
|
|
||||||
if v != 0 {
|
|
||||||
fmt.Printf("\t%d: 0x%08X,\n", i, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
|
|
||||||
// Any run of at least separation continuous zero entries in the reverse map will
|
|
||||||
// be a separate encode table.
|
|
||||||
const separation = 1024
|
|
||||||
|
|
||||||
intervals := []interval(nil)
|
|
||||||
low, high := -1, -1
|
|
||||||
for i, v := range reverse {
|
|
||||||
if v == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if low < 0 {
|
|
||||||
low = i
|
|
||||||
} else if i-high >= separation {
|
|
||||||
if high >= 0 {
|
|
||||||
intervals = append(intervals, interval{low, high})
|
|
||||||
}
|
|
||||||
low = i
|
|
||||||
}
|
|
||||||
high = i + 1
|
|
||||||
}
|
|
||||||
if high >= 0 {
|
|
||||||
intervals = append(intervals, interval{low, high})
|
|
||||||
}
|
|
||||||
sort.Sort(byDecreasingLength(intervals))
|
|
||||||
|
|
||||||
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
|
||||||
fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n")
|
|
||||||
fmt.Printf("// sorted by decreasing length.\n")
|
|
||||||
for i, v := range intervals {
|
|
||||||
fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high)
|
|
||||||
}
|
|
||||||
fmt.Printf("\n")
|
|
||||||
|
|
||||||
for i, v := range intervals {
|
|
||||||
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
|
||||||
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
|
||||||
for j := v.low; j < v.high; j++ {
|
|
||||||
x := reverse[j]
|
|
||||||
if x == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
|
|
||||||
}
|
|
||||||
fmt.Printf("}\n\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// interval is a half-open interval [low, high).
|
|
||||||
type interval struct {
|
|
||||||
low, high int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i interval) len() int { return i.high - i.low }
|
|
||||||
|
|
||||||
// byDecreasingLength sorts intervals by decreasing length.
|
|
||||||
type byDecreasingLength []interval
|
|
||||||
|
|
||||||
func (b byDecreasingLength) Len() int { return len(b) }
|
|
||||||
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
|
||||||
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|
|
@ -1,64 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// Language tag table generator.
|
|
||||||
// Data read from the web.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
"golang.org/x/text/unicode/cldr"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
test = flag.Bool("test",
|
|
||||||
false,
|
|
||||||
"test existing tables; can be used to compare web data with package data.")
|
|
||||||
outputFile = flag.String("output",
|
|
||||||
"tables.go",
|
|
||||||
"output file for generated tables")
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
gen.Init()
|
|
||||||
|
|
||||||
w := gen.NewCodeWriter()
|
|
||||||
defer w.WriteGoFile("tables.go", "compact")
|
|
||||||
|
|
||||||
fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`)
|
|
||||||
|
|
||||||
b := newBuilder(w)
|
|
||||||
gen.WriteCLDRVersion(w)
|
|
||||||
|
|
||||||
b.writeCompactIndex()
|
|
||||||
}
|
|
||||||
|
|
||||||
type builder struct {
|
|
||||||
w *gen.CodeWriter
|
|
||||||
data *cldr.CLDR
|
|
||||||
supp *cldr.SupplementalData
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBuilder(w *gen.CodeWriter) *builder {
|
|
||||||
r := gen.OpenCLDRCoreZip()
|
|
||||||
defer r.Close()
|
|
||||||
d := &cldr.Decoder{}
|
|
||||||
data, err := d.DecodeZip(r)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
b := builder{
|
|
||||||
w: w,
|
|
||||||
data: data,
|
|
||||||
supp: data.Supplemental(),
|
|
||||||
}
|
|
||||||
return &b
|
|
||||||
}
|
|
|
@ -1,113 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// This file generates derivative tables based on the language package itself.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/language"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Compact indices:
|
|
||||||
// Note -va-X variants only apply to localization variants.
|
|
||||||
// BCP variants only ever apply to language.
|
|
||||||
// The only ambiguity between tags is with regions.
|
|
||||||
|
|
||||||
func (b *builder) writeCompactIndex() {
|
|
||||||
// Collect all language tags for which we have any data in CLDR.
|
|
||||||
m := map[language.Tag]bool{}
|
|
||||||
for _, lang := range b.data.Locales() {
|
|
||||||
// We include all locales unconditionally to be consistent with en_US.
|
|
||||||
// We want en_US, even though it has no data associated with it.
|
|
||||||
|
|
||||||
// TODO: put any of the languages for which no data exists at the end
|
|
||||||
// of the index. This allows all components based on ICU to use that
|
|
||||||
// as the cutoff point.
|
|
||||||
// if x := data.RawLDML(lang); false ||
|
|
||||||
// x.LocaleDisplayNames != nil ||
|
|
||||||
// x.Characters != nil ||
|
|
||||||
// x.Delimiters != nil ||
|
|
||||||
// x.Measurement != nil ||
|
|
||||||
// x.Dates != nil ||
|
|
||||||
// x.Numbers != nil ||
|
|
||||||
// x.Units != nil ||
|
|
||||||
// x.ListPatterns != nil ||
|
|
||||||
// x.Collations != nil ||
|
|
||||||
// x.Segmentations != nil ||
|
|
||||||
// x.Rbnf != nil ||
|
|
||||||
// x.Annotations != nil ||
|
|
||||||
// x.Metadata != nil {
|
|
||||||
|
|
||||||
// TODO: support POSIX natively, albeit non-standard.
|
|
||||||
tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
|
|
||||||
m[tag] = true
|
|
||||||
// }
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: plural rules are also defined for the deprecated tags:
|
|
||||||
// iw mo sh tl
|
|
||||||
// Consider removing these as compact tags.
|
|
||||||
|
|
||||||
// Include locales for plural rules, which uses a different structure.
|
|
||||||
for _, plurals := range b.supp.Plurals {
|
|
||||||
for _, rules := range plurals.PluralRules {
|
|
||||||
for _, lang := range strings.Split(rules.Locales, " ") {
|
|
||||||
m[language.Make(lang)] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var coreTags []language.CompactCoreInfo
|
|
||||||
var special []string
|
|
||||||
|
|
||||||
for t := range m {
|
|
||||||
if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
|
|
||||||
log.Fatalf("Unexpected extension %v in %v", x, t)
|
|
||||||
}
|
|
||||||
if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
|
|
||||||
cci, ok := language.GetCompactCore(t)
|
|
||||||
if !ok {
|
|
||||||
log.Fatalf("Locale for non-basic language %q", t)
|
|
||||||
}
|
|
||||||
coreTags = append(coreTags, cci)
|
|
||||||
} else {
|
|
||||||
special = append(special, t.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w := b.w
|
|
||||||
|
|
||||||
sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] })
|
|
||||||
sort.Strings(special)
|
|
||||||
|
|
||||||
w.WriteComment(`
|
|
||||||
NumCompactTags is the number of common tags. The maximum tag is
|
|
||||||
NumCompactTags-1.`)
|
|
||||||
w.WriteConst("NumCompactTags", len(m))
|
|
||||||
|
|
||||||
fmt.Fprintln(w, "const (")
|
|
||||||
for i, t := range coreTags {
|
|
||||||
fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i)
|
|
||||||
}
|
|
||||||
for i, t := range special {
|
|
||||||
fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags))
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, ")")
|
|
||||||
|
|
||||||
w.WriteVar("coreTags", coreTags)
|
|
||||||
|
|
||||||
w.WriteConst("specialTagsStr", strings.Join(special, " "))
|
|
||||||
}
|
|
||||||
|
|
||||||
func ident(s string) string {
|
|
||||||
return strings.Replace(s, "-", "", -1) + "Index"
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
"golang.org/x/text/internal/language"
|
|
||||||
"golang.org/x/text/internal/language/compact"
|
|
||||||
"golang.org/x/text/unicode/cldr"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
r := gen.OpenCLDRCoreZip()
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
d := &cldr.Decoder{}
|
|
||||||
data, err := d.DecodeZip(r)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("DecodeZip: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w := gen.NewCodeWriter()
|
|
||||||
defer w.WriteGoFile("parents.go", "compact")
|
|
||||||
|
|
||||||
// Create parents table.
|
|
||||||
type ID uint16
|
|
||||||
parents := make([]ID, compact.NumCompactTags)
|
|
||||||
for _, loc := range data.Locales() {
|
|
||||||
tag := language.MustParse(loc)
|
|
||||||
index, ok := compact.FromTag(tag)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
parentIndex := compact.ID(0) // und
|
|
||||||
for p := tag.Parent(); p != language.Und; p = p.Parent() {
|
|
||||||
if x, ok := compact.FromTag(p); ok {
|
|
||||||
parentIndex = x
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
parents[index] = ID(parentIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteComment(`
|
|
||||||
parents maps a compact index of a tag to the compact index of the parent of
|
|
||||||
this tag.`)
|
|
||||||
w.WriteVar("parents", parents)
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,20 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// This file contains code common to the maketables.go and the package code.
|
|
||||||
|
|
||||||
// AliasType is the type of an alias in AliasMap.
|
|
||||||
type AliasType int8
|
|
||||||
|
|
||||||
const (
|
|
||||||
Deprecated AliasType = iota
|
|
||||||
Macro
|
|
||||||
Legacy
|
|
||||||
|
|
||||||
AliasTypeUnknown AliasType = -1
|
|
||||||
)
|
|
|
@ -1,305 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// Language tag table generator.
|
|
||||||
// Data read from the web.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
"golang.org/x/text/internal/language"
|
|
||||||
"golang.org/x/text/unicode/cldr"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
test = flag.Bool("test",
|
|
||||||
false,
|
|
||||||
"test existing tables; can be used to compare web data with package data.")
|
|
||||||
outputFile = flag.String("output",
|
|
||||||
"tables.go",
|
|
||||||
"output file for generated tables")
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
gen.Init()
|
|
||||||
|
|
||||||
w := gen.NewCodeWriter()
|
|
||||||
defer w.WriteGoFile("tables.go", "language")
|
|
||||||
|
|
||||||
b := newBuilder(w)
|
|
||||||
gen.WriteCLDRVersion(w)
|
|
||||||
|
|
||||||
b.writeConstants()
|
|
||||||
b.writeMatchData()
|
|
||||||
}
|
|
||||||
|
|
||||||
type builder struct {
|
|
||||||
w *gen.CodeWriter
|
|
||||||
hw io.Writer // MultiWriter for w and w.Hash
|
|
||||||
data *cldr.CLDR
|
|
||||||
supp *cldr.SupplementalData
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *builder) langIndex(s string) uint16 {
|
|
||||||
return uint16(language.MustParseBase(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *builder) regionIndex(s string) int {
|
|
||||||
return int(language.MustParseRegion(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *builder) scriptIndex(s string) int {
|
|
||||||
return int(language.MustParseScript(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBuilder(w *gen.CodeWriter) *builder {
|
|
||||||
r := gen.OpenCLDRCoreZip()
|
|
||||||
defer r.Close()
|
|
||||||
d := &cldr.Decoder{}
|
|
||||||
data, err := d.DecodeZip(r)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
b := builder{
|
|
||||||
w: w,
|
|
||||||
hw: io.MultiWriter(w, w.Hash),
|
|
||||||
data: data,
|
|
||||||
supp: data.Supplemental(),
|
|
||||||
}
|
|
||||||
return &b
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeConsts computes f(v) for all v in values and writes the results
|
|
||||||
// as constants named _v to a single constant block.
|
|
||||||
func (b *builder) writeConsts(f func(string) int, values ...string) {
|
|
||||||
fmt.Fprintln(b.w, "const (")
|
|
||||||
for _, v := range values {
|
|
||||||
fmt.Fprintf(b.w, "\t_%s = %v\n", v, f(v))
|
|
||||||
}
|
|
||||||
fmt.Fprintln(b.w, ")")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: region inclusion data will probably not be use used in future matchers.
|
|
||||||
|
|
||||||
var langConsts = []string{
|
|
||||||
"de", "en", "fr", "it", "mo", "no", "nb", "pt", "sh", "mul", "und",
|
|
||||||
}
|
|
||||||
|
|
||||||
var scriptConsts = []string{
|
|
||||||
"Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy",
|
|
||||||
"Zzzz",
|
|
||||||
}
|
|
||||||
|
|
||||||
var regionConsts = []string{
|
|
||||||
"001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US",
|
|
||||||
"ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *builder) writeConstants() {
|
|
||||||
b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...)
|
|
||||||
b.writeConsts(b.regionIndex, regionConsts...)
|
|
||||||
b.writeConsts(b.scriptIndex, scriptConsts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
type mutualIntelligibility struct {
|
|
||||||
want, have uint16
|
|
||||||
distance uint8
|
|
||||||
oneway bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type scriptIntelligibility struct {
|
|
||||||
wantLang, haveLang uint16
|
|
||||||
wantScript, haveScript uint8
|
|
||||||
distance uint8
|
|
||||||
// Always oneway
|
|
||||||
}
|
|
||||||
|
|
||||||
type regionIntelligibility struct {
|
|
||||||
lang uint16 // compact language id
|
|
||||||
script uint8 // 0 means any
|
|
||||||
group uint8 // 0 means any; if bit 7 is set it means inverse
|
|
||||||
distance uint8
|
|
||||||
// Always twoway.
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMatchData writes tables with languages and scripts for which there is
|
|
||||||
// mutual intelligibility. The data is based on CLDR's languageMatching data.
|
|
||||||
// Note that we use a different algorithm than the one defined by CLDR and that
|
|
||||||
// we slightly modify the data. For example, we convert scores to confidence levels.
|
|
||||||
// We also drop all region-related data as we use a different algorithm to
|
|
||||||
// determine region equivalence.
|
|
||||||
func (b *builder) writeMatchData() {
|
|
||||||
lm := b.supp.LanguageMatching.LanguageMatches
|
|
||||||
cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new")
|
|
||||||
|
|
||||||
regionHierarchy := map[string][]string{}
|
|
||||||
for _, g := range b.supp.TerritoryContainment.Group {
|
|
||||||
regions := strings.Split(g.Contains, " ")
|
|
||||||
regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...)
|
|
||||||
}
|
|
||||||
regionToGroups := make([]uint8, language.NumRegions)
|
|
||||||
|
|
||||||
idToIndex := map[string]uint8{}
|
|
||||||
for i, mv := range lm[0].MatchVariable {
|
|
||||||
if i > 6 {
|
|
||||||
log.Fatalf("Too many groups: %d", i)
|
|
||||||
}
|
|
||||||
idToIndex[mv.Id] = uint8(i + 1)
|
|
||||||
// TODO: also handle '-'
|
|
||||||
for _, r := range strings.Split(mv.Value, "+") {
|
|
||||||
todo := []string{r}
|
|
||||||
for k := 0; k < len(todo); k++ {
|
|
||||||
r := todo[k]
|
|
||||||
regionToGroups[b.regionIndex(r)] |= 1 << uint8(i)
|
|
||||||
todo = append(todo, regionHierarchy[r]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.w.WriteVar("regionToGroups", regionToGroups)
|
|
||||||
|
|
||||||
// maps language id to in- and out-of-group region.
|
|
||||||
paradigmLocales := [][3]uint16{}
|
|
||||||
locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ")
|
|
||||||
for i := 0; i < len(locales); i += 2 {
|
|
||||||
x := [3]uint16{}
|
|
||||||
for j := 0; j < 2; j++ {
|
|
||||||
pc := strings.SplitN(locales[i+j], "-", 2)
|
|
||||||
x[0] = b.langIndex(pc[0])
|
|
||||||
if len(pc) == 2 {
|
|
||||||
x[1+j] = uint16(b.regionIndex(pc[1]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
paradigmLocales = append(paradigmLocales, x)
|
|
||||||
}
|
|
||||||
b.w.WriteVar("paradigmLocales", paradigmLocales)
|
|
||||||
|
|
||||||
b.w.WriteType(mutualIntelligibility{})
|
|
||||||
b.w.WriteType(scriptIntelligibility{})
|
|
||||||
b.w.WriteType(regionIntelligibility{})
|
|
||||||
|
|
||||||
matchLang := []mutualIntelligibility{}
|
|
||||||
matchScript := []scriptIntelligibility{}
|
|
||||||
matchRegion := []regionIntelligibility{}
|
|
||||||
// Convert the languageMatch entries in lists keyed by desired language.
|
|
||||||
for _, m := range lm[0].LanguageMatch {
|
|
||||||
// Different versions of CLDR use different separators.
|
|
||||||
desired := strings.Replace(m.Desired, "-", "_", -1)
|
|
||||||
supported := strings.Replace(m.Supported, "-", "_", -1)
|
|
||||||
d := strings.Split(desired, "_")
|
|
||||||
s := strings.Split(supported, "_")
|
|
||||||
if len(d) != len(s) {
|
|
||||||
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
distance, _ := strconv.ParseInt(m.Distance, 10, 8)
|
|
||||||
switch len(d) {
|
|
||||||
case 2:
|
|
||||||
if desired == supported && desired == "*_*" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// language-script pair.
|
|
||||||
matchScript = append(matchScript, scriptIntelligibility{
|
|
||||||
wantLang: uint16(b.langIndex(d[0])),
|
|
||||||
haveLang: uint16(b.langIndex(s[0])),
|
|
||||||
wantScript: uint8(b.scriptIndex(d[1])),
|
|
||||||
haveScript: uint8(b.scriptIndex(s[1])),
|
|
||||||
distance: uint8(distance),
|
|
||||||
})
|
|
||||||
if m.Oneway != "true" {
|
|
||||||
matchScript = append(matchScript, scriptIntelligibility{
|
|
||||||
wantLang: uint16(b.langIndex(s[0])),
|
|
||||||
haveLang: uint16(b.langIndex(d[0])),
|
|
||||||
wantScript: uint8(b.scriptIndex(s[1])),
|
|
||||||
haveScript: uint8(b.scriptIndex(d[1])),
|
|
||||||
distance: uint8(distance),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
if desired == supported && desired == "*" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if distance == 1 {
|
|
||||||
// nb == no is already handled by macro mapping. Check there
|
|
||||||
// really is only this case.
|
|
||||||
if d[0] != "no" || s[0] != "nb" {
|
|
||||||
log.Fatalf("unhandled equivalence %s == %s", s[0], d[0])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO: consider dropping oneway field and just doubling the entry.
|
|
||||||
matchLang = append(matchLang, mutualIntelligibility{
|
|
||||||
want: uint16(b.langIndex(d[0])),
|
|
||||||
have: uint16(b.langIndex(s[0])),
|
|
||||||
distance: uint8(distance),
|
|
||||||
oneway: m.Oneway == "true",
|
|
||||||
})
|
|
||||||
case 3:
|
|
||||||
if desired == supported && desired == "*_*_*" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if desired != supported {
|
|
||||||
// This is now supported by CLDR, but only one case, which
|
|
||||||
// should already be covered by paradigm locales. For instance,
|
|
||||||
// test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in
|
|
||||||
// testdata/CLDRLocaleMatcherTest.txt tests this.
|
|
||||||
if supported != "en_*_GB" {
|
|
||||||
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ri := regionIntelligibility{
|
|
||||||
lang: b.langIndex(d[0]),
|
|
||||||
distance: uint8(distance),
|
|
||||||
}
|
|
||||||
if d[1] != "*" {
|
|
||||||
ri.script = uint8(b.scriptIndex(d[1]))
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case d[2] == "*":
|
|
||||||
ri.group = 0x80 // not contained in anything
|
|
||||||
case strings.HasPrefix(d[2], "$!"):
|
|
||||||
ri.group = 0x80
|
|
||||||
d[2] = "$" + d[2][len("$!"):]
|
|
||||||
fallthrough
|
|
||||||
case strings.HasPrefix(d[2], "$"):
|
|
||||||
ri.group |= idToIndex[d[2]]
|
|
||||||
}
|
|
||||||
matchRegion = append(matchRegion, ri)
|
|
||||||
default:
|
|
||||||
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.SliceStable(matchLang, func(i, j int) bool {
|
|
||||||
return matchLang[i].distance < matchLang[j].distance
|
|
||||||
})
|
|
||||||
b.w.WriteComment(`
|
|
||||||
matchLang holds pairs of langIDs of base languages that are typically
|
|
||||||
mutually intelligible. Each pair is associated with a confidence and
|
|
||||||
whether the intelligibility goes one or both ways.`)
|
|
||||||
b.w.WriteVar("matchLang", matchLang)
|
|
||||||
|
|
||||||
b.w.WriteComment(`
|
|
||||||
matchScript holds pairs of scriptIDs where readers of one script
|
|
||||||
can typically also read the other. Each is associated with a confidence.`)
|
|
||||||
sort.SliceStable(matchScript, func(i, j int) bool {
|
|
||||||
return matchScript[i].distance < matchScript[j].distance
|
|
||||||
})
|
|
||||||
b.w.WriteVar("matchScript", matchScript)
|
|
||||||
|
|
||||||
sort.SliceStable(matchRegion, func(i, j int) bool {
|
|
||||||
return matchRegion[i].distance < matchRegion[j].distance
|
|
||||||
})
|
|
||||||
b.w.WriteVar("matchRegion", matchRegion)
|
|
||||||
}
|
|
|
@ -1,133 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
"golang.org/x/text/internal/triegen"
|
|
||||||
"golang.org/x/text/internal/ucd"
|
|
||||||
)
|
|
||||||
|
|
||||||
var outputFile = flag.String("out", "tables.go", "output file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
gen.Init()
|
|
||||||
gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
|
|
||||||
gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
|
|
||||||
|
|
||||||
genTables()
|
|
||||||
}
|
|
||||||
|
|
||||||
// bidiClass names and codes taken from class "bc" in
|
|
||||||
// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
|
|
||||||
var bidiClass = map[string]Class{
|
|
||||||
"AL": AL, // ArabicLetter
|
|
||||||
"AN": AN, // ArabicNumber
|
|
||||||
"B": B, // ParagraphSeparator
|
|
||||||
"BN": BN, // BoundaryNeutral
|
|
||||||
"CS": CS, // CommonSeparator
|
|
||||||
"EN": EN, // EuropeanNumber
|
|
||||||
"ES": ES, // EuropeanSeparator
|
|
||||||
"ET": ET, // EuropeanTerminator
|
|
||||||
"L": L, // LeftToRight
|
|
||||||
"NSM": NSM, // NonspacingMark
|
|
||||||
"ON": ON, // OtherNeutral
|
|
||||||
"R": R, // RightToLeft
|
|
||||||
"S": S, // SegmentSeparator
|
|
||||||
"WS": WS, // WhiteSpace
|
|
||||||
|
|
||||||
"FSI": Control,
|
|
||||||
"PDF": Control,
|
|
||||||
"PDI": Control,
|
|
||||||
"LRE": Control,
|
|
||||||
"LRI": Control,
|
|
||||||
"LRO": Control,
|
|
||||||
"RLE": Control,
|
|
||||||
"RLI": Control,
|
|
||||||
"RLO": Control,
|
|
||||||
}
|
|
||||||
|
|
||||||
func genTables() {
|
|
||||||
if numClass > 0x0F {
|
|
||||||
log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
|
|
||||||
}
|
|
||||||
w := gen.NewCodeWriter()
|
|
||||||
defer w.WriteVersionedGoFile(*outputFile, "bidi")
|
|
||||||
|
|
||||||
gen.WriteUnicodeVersion(w)
|
|
||||||
|
|
||||||
t := triegen.NewTrie("bidi")
|
|
||||||
|
|
||||||
// Build data about bracket mapping. These bits need to be or-ed with
|
|
||||||
// any other bits.
|
|
||||||
orMask := map[rune]uint64{}
|
|
||||||
|
|
||||||
xorMap := map[rune]int{}
|
|
||||||
xorMasks := []rune{0} // First value is no-op.
|
|
||||||
|
|
||||||
ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
|
|
||||||
r1 := p.Rune(0)
|
|
||||||
r2 := p.Rune(1)
|
|
||||||
xor := r1 ^ r2
|
|
||||||
if _, ok := xorMap[xor]; !ok {
|
|
||||||
xorMap[xor] = len(xorMasks)
|
|
||||||
xorMasks = append(xorMasks, xor)
|
|
||||||
}
|
|
||||||
entry := uint64(xorMap[xor]) << xorMaskShift
|
|
||||||
switch p.String(2) {
|
|
||||||
case "o":
|
|
||||||
entry |= openMask
|
|
||||||
case "c", "n":
|
|
||||||
default:
|
|
||||||
log.Fatalf("Unknown bracket class %q.", p.String(2))
|
|
||||||
}
|
|
||||||
orMask[r1] = entry
|
|
||||||
})
|
|
||||||
|
|
||||||
w.WriteComment(`
|
|
||||||
xorMasks contains masks to be xor-ed with brackets to get the reverse
|
|
||||||
version.`)
|
|
||||||
w.WriteVar("xorMasks", xorMasks)
|
|
||||||
|
|
||||||
done := map[rune]bool{}
|
|
||||||
|
|
||||||
insert := func(r rune, c Class) {
|
|
||||||
if !done[r] {
|
|
||||||
t.Insert(r, orMask[r]|uint64(c))
|
|
||||||
done[r] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert the derived BiDi properties.
|
|
||||||
ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
|
|
||||||
r := p.Rune(0)
|
|
||||||
class, ok := bidiClass[p.String(1)]
|
|
||||||
if !ok {
|
|
||||||
log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
|
|
||||||
}
|
|
||||||
insert(r, class)
|
|
||||||
})
|
|
||||||
visitDefaults(insert)
|
|
||||||
|
|
||||||
// TODO: use sparse blocks. This would reduce table size considerably
|
|
||||||
// from the looks of it.
|
|
||||||
|
|
||||||
sz, err := t.Gen(w)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
w.Size += sz
|
|
||||||
}
|
|
||||||
|
|
||||||
// dummy values to make methods in gen_common compile. The real versions
|
|
||||||
// will be generated by this file to tables.go.
|
|
||||||
var (
|
|
||||||
xorMasks []rune
|
|
||||||
)
|
|
|
@ -1,57 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
"golang.org/x/text/internal/ucd"
|
|
||||||
"golang.org/x/text/unicode/rangetable"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These tables are hand-extracted from:
|
|
||||||
// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
|
|
||||||
func visitDefaults(fn func(r rune, c Class)) {
|
|
||||||
// first write default values for ranges listed above.
|
|
||||||
visitRunes(fn, AL, []rune{
|
|
||||||
0x0600, 0x07BF, // Arabic
|
|
||||||
0x08A0, 0x08FF, // Arabic Extended-A
|
|
||||||
0xFB50, 0xFDCF, // Arabic Presentation Forms
|
|
||||||
0xFDF0, 0xFDFF,
|
|
||||||
0xFE70, 0xFEFF,
|
|
||||||
0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
|
|
||||||
})
|
|
||||||
visitRunes(fn, R, []rune{
|
|
||||||
0x0590, 0x05FF, // Hebrew
|
|
||||||
0x07C0, 0x089F, // Nko et al.
|
|
||||||
0xFB1D, 0xFB4F,
|
|
||||||
0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
|
|
||||||
0x0001E800, 0x0001EDFF,
|
|
||||||
0x0001EF00, 0x0001EFFF,
|
|
||||||
})
|
|
||||||
visitRunes(fn, ET, []rune{ // European Terminator
|
|
||||||
0x20A0, 0x20Cf, // Currency symbols
|
|
||||||
})
|
|
||||||
rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
|
|
||||||
fn(r, BN) // Boundary Neutral
|
|
||||||
})
|
|
||||||
ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
|
|
||||||
if p.String(1) == "Default_Ignorable_Code_Point" {
|
|
||||||
fn(p.Rune(0), BN) // Boundary Neutral
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
|
|
||||||
for i := 0; i < len(runes); i += 2 {
|
|
||||||
lo, hi := runes[i], runes[i+1]
|
|
||||||
for j := lo; j <= hi; j++ {
|
|
||||||
fn(j, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,64 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
// Class is the Unicode BiDi class. Each rune has a single class.
|
|
||||||
type Class uint
|
|
||||||
|
|
||||||
const (
|
|
||||||
L Class = iota // LeftToRight
|
|
||||||
R // RightToLeft
|
|
||||||
EN // EuropeanNumber
|
|
||||||
ES // EuropeanSeparator
|
|
||||||
ET // EuropeanTerminator
|
|
||||||
AN // ArabicNumber
|
|
||||||
CS // CommonSeparator
|
|
||||||
B // ParagraphSeparator
|
|
||||||
S // SegmentSeparator
|
|
||||||
WS // WhiteSpace
|
|
||||||
ON // OtherNeutral
|
|
||||||
BN // BoundaryNeutral
|
|
||||||
NSM // NonspacingMark
|
|
||||||
AL // ArabicLetter
|
|
||||||
Control // Control LRO - PDI
|
|
||||||
|
|
||||||
numClass
|
|
||||||
|
|
||||||
LRO // LeftToRightOverride
|
|
||||||
RLO // RightToLeftOverride
|
|
||||||
LRE // LeftToRightEmbedding
|
|
||||||
RLE // RightToLeftEmbedding
|
|
||||||
PDF // PopDirectionalFormat
|
|
||||||
LRI // LeftToRightIsolate
|
|
||||||
RLI // RightToLeftIsolate
|
|
||||||
FSI // FirstStrongIsolate
|
|
||||||
PDI // PopDirectionalIsolate
|
|
||||||
|
|
||||||
unknownClass = ^Class(0)
|
|
||||||
)
|
|
||||||
|
|
||||||
var controlToClass = map[rune]Class{
|
|
||||||
0x202D: LRO, // LeftToRightOverride,
|
|
||||||
0x202E: RLO, // RightToLeftOverride,
|
|
||||||
0x202A: LRE, // LeftToRightEmbedding,
|
|
||||||
0x202B: RLE, // RightToLeftEmbedding,
|
|
||||||
0x202C: PDF, // PopDirectionalFormat,
|
|
||||||
0x2066: LRI, // LeftToRightIsolate,
|
|
||||||
0x2067: RLI, // RightToLeftIsolate,
|
|
||||||
0x2068: FSI, // FirstStrongIsolate,
|
|
||||||
0x2069: PDI, // PopDirectionalIsolate,
|
|
||||||
}
|
|
||||||
|
|
||||||
// A trie entry has the following bits:
|
|
||||||
// 7..5 XOR mask for brackets
|
|
||||||
// 4 1: Bracket open, 0: Bracket close
|
|
||||||
// 3..0 Class type
|
|
||||||
|
|
||||||
const (
|
|
||||||
openMask = 0x10
|
|
||||||
xorMaskShift = 5
|
|
||||||
)
|
|
|
@ -1,986 +0,0 @@
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// Normalization table generator.
|
|
||||||
// Data read from the web.
|
|
||||||
// See forminfo.go for a description of the trie values associated with each rune.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/text/internal/gen"
|
|
||||||
"golang.org/x/text/internal/triegen"
|
|
||||||
"golang.org/x/text/internal/ucd"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
gen.Init()
|
|
||||||
loadUnicodeData()
|
|
||||||
compactCCC()
|
|
||||||
loadCompositionExclusions()
|
|
||||||
completeCharFields(FCanonical)
|
|
||||||
completeCharFields(FCompatibility)
|
|
||||||
computeNonStarterCounts()
|
|
||||||
verifyComputed()
|
|
||||||
printChars()
|
|
||||||
testDerived()
|
|
||||||
printTestdata()
|
|
||||||
makeTables()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
tablelist = flag.String("tables",
|
|
||||||
"all",
|
|
||||||
"comma-separated list of which tables to generate; "+
|
|
||||||
"can be 'decomp', 'recomp', 'info' and 'all'")
|
|
||||||
test = flag.Bool("test",
|
|
||||||
false,
|
|
||||||
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
|
|
||||||
verbose = flag.Bool("verbose",
|
|
||||||
false,
|
|
||||||
"write data to stdout as it is parsed")
|
|
||||||
)
|
|
||||||
|
|
||||||
const MaxChar = 0x10FFFF // anything above this shouldn't exist
|
|
||||||
|
|
||||||
// Quick Check properties of runes allow us to quickly
|
|
||||||
// determine whether a rune may occur in a normal form.
|
|
||||||
// For a given normal form, a rune may be guaranteed to occur
|
|
||||||
// verbatim (QC=Yes), may or may not combine with another
|
|
||||||
// rune (QC=Maybe), or may not occur (QC=No).
|
|
||||||
type QCResult int
|
|
||||||
|
|
||||||
const (
|
|
||||||
QCUnknown QCResult = iota
|
|
||||||
QCYes
|
|
||||||
QCNo
|
|
||||||
QCMaybe
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r QCResult) String() string {
|
|
||||||
switch r {
|
|
||||||
case QCYes:
|
|
||||||
return "Yes"
|
|
||||||
case QCNo:
|
|
||||||
return "No"
|
|
||||||
case QCMaybe:
|
|
||||||
return "Maybe"
|
|
||||||
}
|
|
||||||
return "***UNKNOWN***"
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
FCanonical = iota // NFC or NFD
|
|
||||||
FCompatibility // NFKC or NFKD
|
|
||||||
FNumberOfFormTypes
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
MComposed = iota // NFC or NFKC
|
|
||||||
MDecomposed // NFD or NFKD
|
|
||||||
MNumberOfModes
|
|
||||||
)
|
|
||||||
|
|
||||||
// This contains only the properties we're interested in.
|
|
||||||
type Char struct {
|
|
||||||
name string
|
|
||||||
codePoint rune // if zero, this index is not a valid code point.
|
|
||||||
ccc uint8 // canonical combining class
|
|
||||||
origCCC uint8
|
|
||||||
excludeInComp bool // from CompositionExclusions.txt
|
|
||||||
compatDecomp bool // it has a compatibility expansion
|
|
||||||
|
|
||||||
nTrailingNonStarters uint8
|
|
||||||
nLeadingNonStarters uint8 // must be equal to trailing if non-zero
|
|
||||||
|
|
||||||
forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
|
|
||||||
|
|
||||||
state State
|
|
||||||
}
|
|
||||||
|
|
||||||
var chars = make([]Char, MaxChar+1)
|
|
||||||
var cccMap = make(map[uint8]uint8)
|
|
||||||
|
|
||||||
func (c Char) String() string {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
|
|
||||||
fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
|
|
||||||
fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
|
|
||||||
fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
|
|
||||||
fmt.Fprintf(buf, " state: %v\n", c.state)
|
|
||||||
fmt.Fprintf(buf, " NFC:\n")
|
|
||||||
fmt.Fprint(buf, c.forms[FCanonical])
|
|
||||||
fmt.Fprintf(buf, " NFKC:\n")
|
|
||||||
fmt.Fprint(buf, c.forms[FCompatibility])
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// In UnicodeData.txt, some ranges are marked like this:
|
|
||||||
// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
|
|
||||||
// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
|
|
||||||
// parseCharacter keeps a state variable indicating the weirdness.
|
|
||||||
type State int
|
|
||||||
|
|
||||||
const (
|
|
||||||
SNormal State = iota // known to be zero for the type
|
|
||||||
SFirst
|
|
||||||
SLast
|
|
||||||
SMissing
|
|
||||||
)
|
|
||||||
|
|
||||||
var lastChar = rune('\u0000')
|
|
||||||
|
|
||||||
func (c Char) isValid() bool {
|
|
||||||
return c.codePoint != 0 && c.state != SMissing
|
|
||||||
}
|
|
||||||
|
|
||||||
type FormInfo struct {
|
|
||||||
quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
|
|
||||||
verified [MNumberOfModes]bool // index: MComposed or MDecomposed
|
|
||||||
|
|
||||||
combinesForward bool // May combine with rune on the right
|
|
||||||
combinesBackward bool // May combine with rune on the left
|
|
||||||
isOneWay bool // Never appears in result
|
|
||||||
inDecomp bool // Some decompositions result in this char.
|
|
||||||
decomp Decomposition
|
|
||||||
expandedDecomp Decomposition
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FormInfo) String() string {
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0))
|
|
||||||
|
|
||||||
fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
|
|
||||||
fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
|
|
||||||
fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
|
|
||||||
fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
|
|
||||||
fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
|
|
||||||
fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
|
|
||||||
fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
|
|
||||||
fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type Decomposition []rune
|
|
||||||
|
|
||||||
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
|
|
||||||
decomp := strings.Split(s, " ")
|
|
||||||
if len(decomp) > 0 && skipfirst {
|
|
||||||
decomp = decomp[1:]
|
|
||||||
}
|
|
||||||
for _, d := range decomp {
|
|
||||||
point, err := strconv.ParseUint(d, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return a, err
|
|
||||||
}
|
|
||||||
a = append(a, rune(point))
|
|
||||||
}
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadUnicodeData() {
|
|
||||||
f := gen.OpenUCDFile("UnicodeData.txt")
|
|
||||||
defer f.Close()
|
|
||||||
p := ucd.New(f)
|
|
||||||
for p.Next() {
|
|
||||||
r := p.Rune(ucd.CodePoint)
|
|
||||||
char := &chars[r]
|
|
||||||
|
|
||||||
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
|
|
||||||
decmap := p.String(ucd.DecompMapping)
|
|
||||||
|
|
||||||
exp, err := parseDecomposition(decmap, false)
|
|
||||||
isCompat := false
|
|
||||||
if err != nil {
|
|
||||||
if len(decmap) > 0 {
|
|
||||||
exp, err = parseDecomposition(decmap, true)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
|
|
||||||
}
|
|
||||||
isCompat = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
char.name = p.String(ucd.Name)
|
|
||||||
char.codePoint = r
|
|
||||||
char.forms[FCompatibility].decomp = exp
|
|
||||||
if !isCompat {
|
|
||||||
char.forms[FCanonical].decomp = exp
|
|
||||||
} else {
|
|
||||||
char.compatDecomp = true
|
|
||||||
}
|
|
||||||
if len(decmap) > 0 {
|
|
||||||
char.forms[FCompatibility].decomp = exp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := p.Err(); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// compactCCC converts the sparse set of CCC values to a continguous one,
|
|
||||||
// reducing the number of bits needed from 8 to 6.
|
|
||||||
func compactCCC() {
|
|
||||||
m := make(map[uint8]uint8)
|
|
||||||
for i := range chars {
|
|
||||||
c := &chars[i]
|
|
||||||
m[c.ccc] = 0
|
|
||||||
}
|
|
||||||
cccs := []int{}
|
|
||||||
for v, _ := range m {
|
|
||||||
cccs = append(cccs, int(v))
|
|
||||||
}
|
|
||||||
sort.Ints(cccs)
|
|
||||||
for i, c := range cccs {
|
|
||||||
cccMap[uint8(i)] = uint8(c)
|
|
||||||
m[uint8(c)] = uint8(i)
|
|
||||||
}
|
|
||||||
for i := range chars {
|
|
||||||
c := &chars[i]
|
|
||||||
c.origCCC = c.ccc
|
|
||||||
c.ccc = m[c.ccc]
|
|
||||||
}
|
|
||||||
if len(m) >= 1<<6 {
|
|
||||||
log.Fatalf("too many difference CCC values: %d >= 64", len(m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompositionExclusions.txt has form:
|
|
||||||
// 0958 # ...
|
|
||||||
// See https://unicode.org/reports/tr44/ for full explanation
|
|
||||||
func loadCompositionExclusions() {
|
|
||||||
f := gen.OpenUCDFile("CompositionExclusions.txt")
|
|
||||||
defer f.Close()
|
|
||||||
p := ucd.New(f)
|
|
||||||
for p.Next() {
|
|
||||||
c := &chars[p.Rune(0)]
|
|
||||||
if c.excludeInComp {
|
|
||||||
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
|
|
||||||
}
|
|
||||||
c.excludeInComp = true
|
|
||||||
}
|
|
||||||
if e := p.Err(); e != nil {
|
|
||||||
log.Fatal(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// hasCompatDecomp returns true if any of the recursive
|
|
||||||
// decompositions contains a compatibility expansion.
|
|
||||||
// In this case, the character may not occur in NFK*.
|
|
||||||
func hasCompatDecomp(r rune) bool {
|
|
||||||
c := &chars[r]
|
|
||||||
if c.compatDecomp {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, d := range c.forms[FCompatibility].decomp {
|
|
||||||
if hasCompatDecomp(d) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hangul related constants.
|
|
||||||
const (
|
|
||||||
HangulBase = 0xAC00
|
|
||||||
HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
|
|
||||||
|
|
||||||
JamoLBase = 0x1100
|
|
||||||
JamoLEnd = 0x1113
|
|
||||||
JamoVBase = 0x1161
|
|
||||||
JamoVEnd = 0x1176
|
|
||||||
JamoTBase = 0x11A8
|
|
||||||
JamoTEnd = 0x11C3
|
|
||||||
|
|
||||||
JamoLVTCount = 19 * 21 * 28
|
|
||||||
JamoTCount = 28
|
|
||||||
)
|
|
||||||
|
|
||||||
func isHangul(r rune) bool {
|
|
||||||
return HangulBase <= r && r < HangulEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func isHangulWithoutJamoT(r rune) bool {
|
|
||||||
if !isHangul(r) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
r -= HangulBase
|
|
||||||
return r < JamoLVTCount && r%JamoTCount == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func ccc(r rune) uint8 {
|
|
||||||
return chars[r].ccc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert a rune in a buffer, ordered by Canonical Combining Class.
|
|
||||||
func insertOrdered(b Decomposition, r rune) Decomposition {
|
|
||||||
n := len(b)
|
|
||||||
b = append(b, 0)
|
|
||||||
cc := ccc(r)
|
|
||||||
if cc > 0 {
|
|
||||||
// Use bubble sort.
|
|
||||||
for ; n > 0; n-- {
|
|
||||||
if ccc(b[n-1]) <= cc {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
b[n] = b[n-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b[n] = r
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recursively decompose.
|
|
||||||
func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
|
|
||||||
dcomp := chars[r].forms[form].decomp
|
|
||||||
if len(dcomp) == 0 {
|
|
||||||
return insertOrdered(d, r)
|
|
||||||
}
|
|
||||||
for _, c := range dcomp {
|
|
||||||
d = decomposeRecursive(form, c, d)
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func completeCharFields(form int) {
|
|
||||||
// Phase 0: pre-expand decomposition.
|
|
||||||
for i := range chars {
|
|
||||||
f := &chars[i].forms[form]
|
|
||||||
if len(f.decomp) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
exp := make(Decomposition, 0)
|
|
||||||
for _, c := range f.decomp {
|
|
||||||
exp = decomposeRecursive(form, c, exp)
|
|
||||||
}
|
|
||||||
f.expandedDecomp = exp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 1: composition exclusion, mark decomposition.
|
|
||||||
for i := range chars {
|
|
||||||
c := &chars[i]
|
|
||||||
f := &c.forms[form]
|
|
||||||
|
|
||||||
// Marks script-specific exclusions and version restricted.
|
|
||||||
f.isOneWay = c.excludeInComp
|
|
||||||
|
|
||||||
// Singletons
|
|
||||||
f.isOneWay = f.isOneWay || len(f.decomp) == 1
|
|
||||||
|
|
||||||
// Non-starter decompositions
|
|
||||||
if len(f.decomp) > 1 {
|
|
||||||
chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
|
|
||||||
f.isOneWay = f.isOneWay || chk
|
|
||||||
}
|
|
||||||
|
|
||||||
// Runes that decompose into more than two runes.
|
|
||||||
f.isOneWay = f.isOneWay || len(f.decomp) > 2
|
|
||||||
|
|
||||||
if form == FCompatibility {
|
|
||||||
f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, r := range f.decomp {
|
|
||||||
chars[r].forms[form].inDecomp = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 2: forward and backward combining.
|
|
||||||
for i := range chars {
|
|
||||||
c := &chars[i]
|
|
||||||
f := &c.forms[form]
|
|
||||||
|
|
||||||
if !f.isOneWay && len(f.decomp) == 2 {
|
|
||||||
f0 := &chars[f.decomp[0]].forms[form]
|
|
||||||
f1 := &chars[f.decomp[1]].forms[form]
|
|
||||||
if !f0.isOneWay {
|
|
||||||
f0.combinesForward = true
|
|
||||||
}
|
|
||||||
if !f1.isOneWay {
|
|
||||||
f1.combinesBackward = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isHangulWithoutJamoT(rune(i)) {
|
|
||||||
f.combinesForward = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 3: quick check values.
|
|
||||||
for i := range chars {
|
|
||||||
c := &chars[i]
|
|
||||||
f := &c.forms[form]
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case len(f.decomp) > 0:
|
|
||||||
f.quickCheck[MDecomposed] = QCNo
|
|
||||||
case isHangul(rune(i)):
|
|
||||||
f.quickCheck[MDecomposed] = QCNo
|
|
||||||
default:
|
|
||||||
f.quickCheck[MDecomposed] = QCYes
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case f.isOneWay:
|
|
||||||
f.quickCheck[MComposed] = QCNo
|
|
||||||
case (i & 0xffff00) == JamoLBase:
|
|
||||||
f.quickCheck[MComposed] = QCYes
|
|
||||||
if JamoLBase <= i && i < JamoLEnd {
|
|
||||||
f.combinesForward = true
|
|
||||||
}
|
|
||||||
if JamoVBase <= i && i < JamoVEnd {
|
|
||||||
f.quickCheck[MComposed] = QCMaybe
|
|
||||||
f.combinesBackward = true
|
|
||||||
f.combinesForward = true
|
|
||||||
}
|
|
||||||
if JamoTBase <= i && i < JamoTEnd {
|
|
||||||
f.quickCheck[MComposed] = QCMaybe
|
|
||||||
f.combinesBackward = true
|
|
||||||
}
|
|
||||||
case !f.combinesBackward:
|
|
||||||
f.quickCheck[MComposed] = QCYes
|
|
||||||
default:
|
|
||||||
f.quickCheck[MComposed] = QCMaybe
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeNonStarterCounts() {
|
|
||||||
// Phase 4: leading and trailing non-starter count
|
|
||||||
for i := range chars {
|
|
||||||
c := &chars[i]
|
|
||||||
|
|
||||||
runes := []rune{rune(i)}
|
|
||||||
// We always use FCompatibility so that the CGJ insertion points do not
|
|
||||||
// change for repeated normalizations with different forms.
|
|
||||||
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
|
|
||||||
runes = exp
|
|
||||||
}
|
|
||||||
// We consider runes that combine backwards to be non-starters for the
|
|
||||||
// purpose of Stream-Safe Text Processing.
|
|
||||||
for _, r := range runes {
|
|
||||||
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c.nLeadingNonStarters++
|
|
||||||
}
|
|
||||||
for i := len(runes) - 1; i >= 0; i-- {
|
|
||||||
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c.nTrailingNonStarters++
|
|
||||||
}
|
|
||||||
if c.nTrailingNonStarters > 3 {
|
|
||||||
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isHangul(rune(i)) {
|
|
||||||
c.nTrailingNonStarters = 2
|
|
||||||
if isHangulWithoutJamoT(rune(i)) {
|
|
||||||
c.nTrailingNonStarters = 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
|
|
||||||
log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
|
|
||||||
}
|
|
||||||
if t := c.nTrailingNonStarters; t > 3 {
|
|
||||||
log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func printBytes(w io.Writer, b []byte, name string) {
|
|
||||||
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
|
|
||||||
fmt.Fprintf(w, "var %s = [...]byte {", name)
|
|
||||||
for i, c := range b {
|
|
||||||
switch {
|
|
||||||
case i%64 == 0:
|
|
||||||
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
|
|
||||||
case i%8 == 0:
|
|
||||||
fmt.Fprintf(w, "\n")
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "0x%.2X, ", c)
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, "\n}\n\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// See forminfo.go for format.
|
|
||||||
func makeEntry(f *FormInfo, c *Char) uint16 {
|
|
||||||
e := uint16(0)
|
|
||||||
if r := c.codePoint; HangulBase <= r && r < HangulEnd {
|
|
||||||
e |= 0x40
|
|
||||||
}
|
|
||||||
if f.combinesForward {
|
|
||||||
e |= 0x20
|
|
||||||
}
|
|
||||||
if f.quickCheck[MDecomposed] == QCNo {
|
|
||||||
e |= 0x4
|
|
||||||
}
|
|
||||||
switch f.quickCheck[MComposed] {
|
|
||||||
case QCYes:
|
|
||||||
case QCNo:
|
|
||||||
e |= 0x10
|
|
||||||
case QCMaybe:
|
|
||||||
e |= 0x18
|
|
||||||
default:
|
|
||||||
log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
|
|
||||||
}
|
|
||||||
e |= uint16(c.nTrailingNonStarters)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// decompSet keeps track of unique decompositions, grouped by whether
|
|
||||||
// the decomposition is followed by a trailing and/or leading CCC.
|
|
||||||
type decompSet [7]map[string]bool
|
|
||||||
|
|
||||||
const (
|
|
||||||
normalDecomp = iota
|
|
||||||
firstMulti
|
|
||||||
firstCCC
|
|
||||||
endMulti
|
|
||||||
firstLeadingCCC
|
|
||||||
firstCCCZeroExcept
|
|
||||||
firstStarterWithNLead
|
|
||||||
lastDecomp
|
|
||||||
)
|
|
||||||
|
|
||||||
var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
|
|
||||||
|
|
||||||
func makeDecompSet() decompSet {
|
|
||||||
m := decompSet{}
|
|
||||||
for i := range m {
|
|
||||||
m[i] = make(map[string]bool)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
func (m *decompSet) insert(key int, s string) {
|
|
||||||
m[key][s] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func printCharInfoTables(w io.Writer) int {
|
|
||||||
mkstr := func(r rune, f *FormInfo) (int, string) {
|
|
||||||
d := f.expandedDecomp
|
|
||||||
s := string([]rune(d))
|
|
||||||
if max := 1 << 6; len(s) >= max {
|
|
||||||
const msg = "%U: too many bytes in decomposition: %d >= %d"
|
|
||||||
log.Fatalf(msg, r, len(s), max)
|
|
||||||
}
|
|
||||||
head := uint8(len(s))
|
|
||||||
if f.quickCheck[MComposed] != QCYes {
|
|
||||||
head |= 0x40
|
|
||||||
}
|
|
||||||
if f.combinesForward {
|
|
||||||
head |= 0x80
|
|
||||||
}
|
|
||||||
s = string([]byte{head}) + s
|
|
||||||
|
|
||||||
lccc := ccc(d[0])
|
|
||||||
tccc := ccc(d[len(d)-1])
|
|
||||||
cc := ccc(r)
|
|
||||||
if cc != 0 && lccc == 0 && tccc == 0 {
|
|
||||||
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
|
|
||||||
}
|
|
||||||
if tccc < lccc && lccc != 0 {
|
|
||||||
const msg = "%U: lccc (%d) must be <= tcc (%d)"
|
|
||||||
log.Fatalf(msg, r, lccc, tccc)
|
|
||||||
}
|
|
||||||
index := normalDecomp
|
|
||||||
nTrail := chars[r].nTrailingNonStarters
|
|
||||||
nLead := chars[r].nLeadingNonStarters
|
|
||||||
if tccc > 0 || lccc > 0 || nTrail > 0 {
|
|
||||||
tccc <<= 2
|
|
||||||
tccc |= nTrail
|
|
||||||
s += string([]byte{tccc})
|
|
||||||
index = endMulti
|
|
||||||
for _, r := range d[1:] {
|
|
||||||
if ccc(r) == 0 {
|
|
||||||
index = firstCCC
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if lccc > 0 || nLead > 0 {
|
|
||||||
s += string([]byte{lccc})
|
|
||||||
if index == firstCCC {
|
|
||||||
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
|
|
||||||
}
|
|
||||||
index = firstLeadingCCC
|
|
||||||
}
|
|
||||||
if cc != lccc {
|
|
||||||
if cc != 0 {
|
|
||||||
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
|
|
||||||
}
|
|
||||||
index = firstCCCZeroExcept
|
|
||||||
}
|
|
||||||
} else if len(d) > 1 {
|
|
||||||
index = firstMulti
|
|
||||||
}
|
|
||||||
return index, s
|
|
||||||
}
|
|
||||||
|
|
||||||
decompSet := makeDecompSet()
|
|
||||||
const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
|
|
||||||
decompSet.insert(firstStarterWithNLead, nLeadStr)
|
|
||||||
|
|
||||||
// Store the uniqued decompositions in a byte buffer,
|
|
||||||
// preceded by their byte length.
|
|
||||||
for _, c := range chars {
|
|
||||||
for _, f := range c.forms {
|
|
||||||
if len(f.expandedDecomp) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if f.combinesBackward {
|
|
||||||
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
|
|
||||||
}
|
|
||||||
index, s := mkstr(c.codePoint, &f)
|
|
||||||
decompSet.insert(index, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
|
|
||||||
size := 0
|
|
||||||
positionMap := make(map[string]uint16)
|
|
||||||
decompositions.WriteString("\000")
|
|
||||||
fmt.Fprintln(w, "const (")
|
|
||||||
for i, m := range decompSet {
|
|
||||||
sa := []string{}
|
|
||||||
for s := range m {
|
|
||||||
sa = append(sa, s)
|
|
||||||
}
|
|
||||||
sort.Strings(sa)
|
|
||||||
for _, s := range sa {
|
|
||||||
p := decompositions.Len()
|
|
||||||
decompositions.WriteString(s)
|
|
||||||
positionMap[s] = uint16(p)
|
|
||||||
}
|
|
||||||
if cname[i] != "" {
|
|
||||||
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, "maxDecomp = 0x8000")
|
|
||||||
fmt.Fprintln(w, ")")
|
|
||||||
b := decompositions.Bytes()
|
|
||||||
printBytes(w, b, "decomps")
|
|
||||||
size += len(b)
|
|
||||||
|
|
||||||
varnames := []string{"nfc", "nfkc"}
|
|
||||||
for i := 0; i < FNumberOfFormTypes; i++ {
|
|
||||||
trie := triegen.NewTrie(varnames[i])
|
|
||||||
|
|
||||||
for r, c := range chars {
|
|
||||||
f := c.forms[i]
|
|
||||||
d := f.expandedDecomp
|
|
||||||
if len(d) != 0 {
|
|
||||||
_, key := mkstr(c.codePoint, &f)
|
|
||||||
trie.Insert(rune(r), uint64(positionMap[key]))
|
|
||||||
if c.ccc != ccc(d[0]) {
|
|
||||||
// We assume the lead ccc of a decomposition !=0 in this case.
|
|
||||||
if ccc(d[0]) == 0 {
|
|
||||||
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
|
|
||||||
// Handle cases where it can't be detected that the nLead should be equal
|
|
||||||
// to nTrail.
|
|
||||||
trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
|
|
||||||
} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
|
|
||||||
trie.Insert(c.codePoint, uint64(0x8000|v))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
size += sz
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
func contains(sa []string, s string) bool {
|
|
||||||
for _, a := range sa {
|
|
||||||
if a == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeTables() {
|
|
||||||
w := &bytes.Buffer{}
|
|
||||||
|
|
||||||
size := 0
|
|
||||||
if *tablelist == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
list := strings.Split(*tablelist, ",")
|
|
||||||
if *tablelist == "all" {
|
|
||||||
list = []string{"recomp", "info"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute maximum decomposition size.
|
|
||||||
max := 0
|
|
||||||
for _, c := range chars {
|
|
||||||
if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
|
|
||||||
max = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, `import "sync"`)
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
|
|
||||||
fmt.Fprintln(w, "const (")
|
|
||||||
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
|
|
||||||
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
|
|
||||||
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
|
|
||||||
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
|
|
||||||
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
|
|
||||||
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
|
|
||||||
fmt.Fprintln(w, ")\n")
|
|
||||||
|
|
||||||
// Print the CCC remap table.
|
|
||||||
size += len(cccMap)
|
|
||||||
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
|
|
||||||
for i := 0; i < len(cccMap); i++ {
|
|
||||||
if i%8 == 0 {
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, "\n}\n")
|
|
||||||
|
|
||||||
if contains(list, "info") {
|
|
||||||
size += printCharInfoTables(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
if contains(list, "recomp") {
|
|
||||||
// Note that we use 32 bit keys, instead of 64 bit.
|
|
||||||
// This clips the bits of three entries, but we know
|
|
||||||
// this won't cause a collision. The compiler will catch
|
|
||||||
// any changes made to UnicodeData.txt that introduces
|
|
||||||
// a collision.
|
|
||||||
// Note that the recomposition map for NFC and NFKC
|
|
||||||
// are identical.
|
|
||||||
|
|
||||||
// Recomposition map
|
|
||||||
nrentries := 0
|
|
||||||
for _, c := range chars {
|
|
||||||
f := c.forms[FCanonical]
|
|
||||||
if !f.isOneWay && len(f.decomp) > 0 {
|
|
||||||
nrentries++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sz := nrentries * 8
|
|
||||||
size += sz
|
|
||||||
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
|
|
||||||
fmt.Fprintln(w, "var recompMap map[uint32]rune")
|
|
||||||
fmt.Fprintln(w, "var recompMapOnce sync.Once\n")
|
|
||||||
fmt.Fprintln(w, `const recompMapPacked = "" +`)
|
|
||||||
var buf [8]byte
|
|
||||||
for i, c := range chars {
|
|
||||||
f := c.forms[FCanonical]
|
|
||||||
d := f.decomp
|
|
||||||
if !f.isOneWay && len(d) > 0 {
|
|
||||||
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
|
|
||||||
binary.BigEndian.PutUint32(buf[:4], key)
|
|
||||||
binary.BigEndian.PutUint32(buf[4:], uint32(i))
|
|
||||||
fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// hack so we don't have to special case the trailing plus sign
|
|
||||||
fmt.Fprintf(w, ` ""`)
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
|
|
||||||
gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
func printChars() {
|
|
||||||
if *verbose {
|
|
||||||
for _, c := range chars {
|
|
||||||
if !c.isValid() || c.state == SMissing {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Println(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyComputed does various consistency tests.
|
|
||||||
func verifyComputed() {
|
|
||||||
for i, c := range chars {
|
|
||||||
for _, f := range c.forms {
|
|
||||||
isNo := (f.quickCheck[MDecomposed] == QCNo)
|
|
||||||
if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
|
|
||||||
log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
isMaybe := f.quickCheck[MComposed] == QCMaybe
|
|
||||||
if f.combinesBackward != isMaybe {
|
|
||||||
log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
|
|
||||||
}
|
|
||||||
if len(f.decomp) > 0 && f.combinesForward && isMaybe {
|
|
||||||
log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(f.expandedDecomp) != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
|
|
||||||
// We accept these runes to be treated differently (it only affects
|
|
||||||
// segment breaking in iteration, most likely on improper use), but
|
|
||||||
// reconsider if more characters are added.
|
|
||||||
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
|
|
||||||
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
|
|
||||||
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
|
|
||||||
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
|
|
||||||
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
|
|
||||||
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
|
|
||||||
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
|
|
||||||
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nfc := c.forms[FCanonical]
|
|
||||||
nfkc := c.forms[FCompatibility]
|
|
||||||
if nfc.combinesBackward != nfkc.combinesBackward {
|
|
||||||
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use values in DerivedNormalizationProps.txt to compare against the
|
|
||||||
// values we computed.
|
|
||||||
// DerivedNormalizationProps.txt has form:
|
|
||||||
// 00C0..00C5 ; NFD_QC; N # ...
|
|
||||||
// 0374 ; NFD_QC; N # ...
|
|
||||||
// See https://unicode.org/reports/tr44/ for full explanation
|
|
||||||
func testDerived() {
|
|
||||||
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
|
|
||||||
defer f.Close()
|
|
||||||
p := ucd.New(f)
|
|
||||||
for p.Next() {
|
|
||||||
r := p.Rune(0)
|
|
||||||
c := &chars[r]
|
|
||||||
|
|
||||||
var ftype, mode int
|
|
||||||
qt := p.String(1)
|
|
||||||
switch qt {
|
|
||||||
case "NFC_QC":
|
|
||||||
ftype, mode = FCanonical, MComposed
|
|
||||||
case "NFD_QC":
|
|
||||||
ftype, mode = FCanonical, MDecomposed
|
|
||||||
case "NFKC_QC":
|
|
||||||
ftype, mode = FCompatibility, MComposed
|
|
||||||
case "NFKD_QC":
|
|
||||||
ftype, mode = FCompatibility, MDecomposed
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var qr QCResult
|
|
||||||
switch p.String(2) {
|
|
||||||
case "Y":
|
|
||||||
qr = QCYes
|
|
||||||
case "N":
|
|
||||||
qr = QCNo
|
|
||||||
case "M":
|
|
||||||
qr = QCMaybe
|
|
||||||
default:
|
|
||||||
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
|
|
||||||
}
|
|
||||||
if got := c.forms[ftype].quickCheck[mode]; got != qr {
|
|
||||||
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
|
|
||||||
}
|
|
||||||
c.forms[ftype].verified[mode] = true
|
|
||||||
}
|
|
||||||
if err := p.Err(); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
// Any unspecified value must be QCYes. Verify this.
|
|
||||||
for i, c := range chars {
|
|
||||||
for j, fd := range c.forms {
|
|
||||||
for k, qr := range fd.quickCheck {
|
|
||||||
if !fd.verified[k] && qr != QCYes {
|
|
||||||
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
|
|
||||||
log.Printf(m, i, j, k, qr, c.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var testHeader = `const (
|
|
||||||
Yes = iota
|
|
||||||
No
|
|
||||||
Maybe
|
|
||||||
)
|
|
||||||
|
|
||||||
type formData struct {
|
|
||||||
qc uint8
|
|
||||||
combinesForward bool
|
|
||||||
decomposition string
|
|
||||||
}
|
|
||||||
|
|
||||||
type runeData struct {
|
|
||||||
r rune
|
|
||||||
ccc uint8
|
|
||||||
nLead uint8
|
|
||||||
nTrail uint8
|
|
||||||
f [2]formData // 0: canonical; 1: compatibility
|
|
||||||
}
|
|
||||||
|
|
||||||
func f(qc uint8, cf bool, dec string) [2]formData {
|
|
||||||
return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
|
|
||||||
return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
|
|
||||||
}
|
|
||||||
|
|
||||||
var testData = []runeData{
|
|
||||||
`
|
|
||||||
|
|
||||||
func printTestdata() {
|
|
||||||
type lastInfo struct {
|
|
||||||
ccc uint8
|
|
||||||
nLead uint8
|
|
||||||
nTrail uint8
|
|
||||||
f string
|
|
||||||
}
|
|
||||||
|
|
||||||
last := lastInfo{}
|
|
||||||
w := &bytes.Buffer{}
|
|
||||||
fmt.Fprintf(w, testHeader)
|
|
||||||
for r, c := range chars {
|
|
||||||
f := c.forms[FCanonical]
|
|
||||||
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
|
|
||||||
f = c.forms[FCompatibility]
|
|
||||||
qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
|
|
||||||
s := ""
|
|
||||||
if d == dk && qc == qck && cf == cfk {
|
|
||||||
s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
|
|
||||||
} else {
|
|
||||||
s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
|
|
||||||
}
|
|
||||||
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
|
|
||||||
if last != current {
|
|
||||||
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
|
|
||||||
last = current
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, "}")
|
|
||||||
gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
|
|
||||||
}
|
|
|
@ -1,117 +0,0 @@
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
// Trie table generator.
|
|
||||||
// Used by make*tables tools to generate a go file with trie data structures
|
|
||||||
// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
|
|
||||||
// sequence are used to lookup offsets in the index table to be used for the
|
|
||||||
// next byte. The last byte is used to index into a table with 16-bit values.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxSparseEntries = 16
|
|
||||||
|
|
||||||
type normCompacter struct {
|
|
||||||
sparseBlocks [][]uint64
|
|
||||||
sparseOffset []uint16
|
|
||||||
sparseCount int
|
|
||||||
name string
|
|
||||||
}
|
|
||||||
|
|
||||||
func mostFrequentStride(a []uint64) int {
|
|
||||||
counts := make(map[int]int)
|
|
||||||
var v int
|
|
||||||
for _, x := range a {
|
|
||||||
if stride := int(x) - v; v != 0 && stride >= 0 {
|
|
||||||
counts[stride]++
|
|
||||||
}
|
|
||||||
v = int(x)
|
|
||||||
}
|
|
||||||
var maxs, maxc int
|
|
||||||
for stride, cnt := range counts {
|
|
||||||
if cnt > maxc || (cnt == maxc && stride < maxs) {
|
|
||||||
maxs, maxc = stride, cnt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return maxs
|
|
||||||
}
|
|
||||||
|
|
||||||
func countSparseEntries(a []uint64) int {
|
|
||||||
stride := mostFrequentStride(a)
|
|
||||||
var v, count int
|
|
||||||
for _, tv := range a {
|
|
||||||
if int(tv)-v != stride {
|
|
||||||
if tv != 0 {
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v = int(tv)
|
|
||||||
}
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
|
|
||||||
if n := countSparseEntries(v); n <= maxSparseEntries {
|
|
||||||
return (n+1)*4 + 2, true
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *normCompacter) Store(v []uint64) uint32 {
|
|
||||||
h := uint32(len(c.sparseOffset))
|
|
||||||
c.sparseBlocks = append(c.sparseBlocks, v)
|
|
||||||
c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
|
|
||||||
c.sparseCount += countSparseEntries(v) + 1
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *normCompacter) Handler() string {
|
|
||||||
return c.name + "Sparse.lookup"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *normCompacter) Print(w io.Writer) (retErr error) {
|
|
||||||
p := func(f string, x ...interface{}) {
|
|
||||||
if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
|
|
||||||
retErr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ls := len(c.sparseBlocks)
|
|
||||||
p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
|
|
||||||
p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
|
|
||||||
|
|
||||||
ns := c.sparseCount
|
|
||||||
p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
|
|
||||||
p("var %sSparseValues = [%d]valueRange {", c.name, ns)
|
|
||||||
for i, b := range c.sparseBlocks {
|
|
||||||
p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
|
|
||||||
var v int
|
|
||||||
stride := mostFrequentStride(b)
|
|
||||||
n := countSparseEntries(b)
|
|
||||||
p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
|
|
||||||
for i, nv := range b {
|
|
||||||
if int(nv)-v != stride {
|
|
||||||
if v != 0 {
|
|
||||||
p(",hi:%#02x},", 0x80+i-1)
|
|
||||||
}
|
|
||||||
if nv != 0 {
|
|
||||||
p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v = int(nv)
|
|
||||||
}
|
|
||||||
if v != 0 {
|
|
||||||
p(",hi:%#02x},", 0x80+len(b)-1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p("\n}\n\n")
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,45 +1,45 @@
|
||||||
# cloud.google.com/go v0.45.1
|
# cloud.google.com/go v0.45.1
|
||||||
cloud.google.com/go/storage
|
cloud.google.com/go/compute/metadata
|
||||||
cloud.google.com/go/iam
|
cloud.google.com/go/iam
|
||||||
cloud.google.com/go/internal
|
cloud.google.com/go/internal
|
||||||
cloud.google.com/go/internal/optional
|
cloud.google.com/go/internal/optional
|
||||||
cloud.google.com/go/internal/trace
|
cloud.google.com/go/internal/trace
|
||||||
cloud.google.com/go/internal/version
|
cloud.google.com/go/internal/version
|
||||||
cloud.google.com/go/compute/metadata
|
cloud.google.com/go/storage
|
||||||
# github.com/Azure/azure-sdk-for-go v21.3.0+incompatible
|
# github.com/Azure/azure-sdk-for-go v21.3.0+incompatible
|
||||||
github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources
|
github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources
|
||||||
github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage
|
github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage
|
||||||
github.com/Azure/azure-sdk-for-go/storage
|
|
||||||
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources
|
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources
|
||||||
github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage
|
github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage
|
||||||
|
github.com/Azure/azure-sdk-for-go/storage
|
||||||
github.com/Azure/azure-sdk-for-go/version
|
github.com/Azure/azure-sdk-for-go/version
|
||||||
# github.com/Azure/go-autorest v10.15.4+incompatible
|
# github.com/Azure/go-autorest v10.15.4+incompatible
|
||||||
github.com/Azure/go-autorest/autorest
|
github.com/Azure/go-autorest/autorest
|
||||||
github.com/Azure/go-autorest/autorest/adal
|
github.com/Azure/go-autorest/autorest/adal
|
||||||
github.com/Azure/go-autorest/autorest/azure
|
github.com/Azure/go-autorest/autorest/azure
|
||||||
github.com/Azure/go-autorest/logger
|
|
||||||
github.com/Azure/go-autorest/version
|
|
||||||
github.com/Azure/go-autorest/autorest/date
|
|
||||||
github.com/Azure/go-autorest/autorest/azure/cli
|
github.com/Azure/go-autorest/autorest/azure/cli
|
||||||
|
github.com/Azure/go-autorest/autorest/date
|
||||||
github.com/Azure/go-autorest/autorest/to
|
github.com/Azure/go-autorest/autorest/to
|
||||||
github.com/Azure/go-autorest/autorest/validation
|
github.com/Azure/go-autorest/autorest/validation
|
||||||
|
github.com/Azure/go-autorest/logger
|
||||||
|
github.com/Azure/go-autorest/version
|
||||||
# github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4
|
# github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4
|
||||||
github.com/Azure/go-ntlmssp
|
github.com/Azure/go-ntlmssp
|
||||||
# github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022
|
# github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022
|
||||||
github.com/ChrisTrenkamp/goxpath
|
github.com/ChrisTrenkamp/goxpath
|
||||||
github.com/ChrisTrenkamp/goxpath/tree
|
|
||||||
github.com/ChrisTrenkamp/goxpath/tree/xmltree
|
|
||||||
github.com/ChrisTrenkamp/goxpath/internal/execxp
|
github.com/ChrisTrenkamp/goxpath/internal/execxp
|
||||||
github.com/ChrisTrenkamp/goxpath/parser
|
|
||||||
github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder
|
|
||||||
github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele
|
|
||||||
github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil
|
github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil
|
||||||
github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns
|
github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns
|
||||||
github.com/ChrisTrenkamp/goxpath/internal/xsort
|
github.com/ChrisTrenkamp/goxpath/internal/xsort
|
||||||
github.com/ChrisTrenkamp/goxpath/lexer
|
github.com/ChrisTrenkamp/goxpath/lexer
|
||||||
|
github.com/ChrisTrenkamp/goxpath/parser
|
||||||
github.com/ChrisTrenkamp/goxpath/parser/pathexpr
|
github.com/ChrisTrenkamp/goxpath/parser/pathexpr
|
||||||
github.com/ChrisTrenkamp/goxpath/xconst
|
github.com/ChrisTrenkamp/goxpath/tree
|
||||||
|
github.com/ChrisTrenkamp/goxpath/tree/xmltree
|
||||||
|
github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder
|
||||||
|
github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele
|
||||||
github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode
|
github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode
|
||||||
|
github.com/ChrisTrenkamp/goxpath/xconst
|
||||||
# github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292
|
# github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292
|
||||||
github.com/Unknwon/com
|
github.com/Unknwon/com
|
||||||
# github.com/agext/levenshtein v1.2.2
|
# github.com/agext/levenshtein v1.2.2
|
||||||
|
@ -49,17 +49,17 @@ github.com/agl/ed25519
|
||||||
github.com/agl/ed25519/edwards25519
|
github.com/agl/ed25519/edwards25519
|
||||||
# github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a
|
# github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials
|
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests
|
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/services/location
|
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/services/sts
|
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth
|
||||||
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider
|
||||||
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors
|
||||||
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils
|
github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers
|
github.com/aliyun/alibaba-cloud-sdk-go/services/location
|
||||||
|
github.com/aliyun/alibaba-cloud-sdk-go/services/sts
|
||||||
# github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70
|
# github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70
|
||||||
github.com/aliyun/aliyun-oss-go-sdk/oss
|
github.com/aliyun/aliyun-oss-go-sdk/oss
|
||||||
# github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible
|
# github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible
|
||||||
|
@ -80,48 +80,49 @@ github.com/apparentlymart/go-textseg/textseg
|
||||||
github.com/armon/circbuf
|
github.com/armon/circbuf
|
||||||
# github.com/armon/go-radix v1.0.0
|
# github.com/armon/go-radix v1.0.0
|
||||||
github.com/armon/go-radix
|
github.com/armon/go-radix
|
||||||
# github.com/aws/aws-sdk-go v1.22.0
|
# github.com/aws/aws-sdk-go v1.25.3
|
||||||
github.com/aws/aws-sdk-go/aws
|
github.com/aws/aws-sdk-go/aws
|
||||||
|
github.com/aws/aws-sdk-go/aws/arn
|
||||||
github.com/aws/aws-sdk-go/aws/awserr
|
github.com/aws/aws-sdk-go/aws/awserr
|
||||||
github.com/aws/aws-sdk-go/service/dynamodb
|
|
||||||
github.com/aws/aws-sdk-go/service/s3
|
|
||||||
github.com/aws/aws-sdk-go/aws/credentials
|
|
||||||
github.com/aws/aws-sdk-go/aws/endpoints
|
|
||||||
github.com/aws/aws-sdk-go/internal/sdkio
|
|
||||||
github.com/aws/aws-sdk-go/aws/awsutil
|
github.com/aws/aws-sdk-go/aws/awsutil
|
||||||
github.com/aws/aws-sdk-go/aws/client
|
github.com/aws/aws-sdk-go/aws/client
|
||||||
github.com/aws/aws-sdk-go/aws/client/metadata
|
github.com/aws/aws-sdk-go/aws/client/metadata
|
||||||
|
github.com/aws/aws-sdk-go/aws/corehandlers
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials/processcreds
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials/stscreds
|
||||||
github.com/aws/aws-sdk-go/aws/crr
|
github.com/aws/aws-sdk-go/aws/crr
|
||||||
|
github.com/aws/aws-sdk-go/aws/csm
|
||||||
|
github.com/aws/aws-sdk-go/aws/defaults
|
||||||
|
github.com/aws/aws-sdk-go/aws/ec2metadata
|
||||||
|
github.com/aws/aws-sdk-go/aws/endpoints
|
||||||
github.com/aws/aws-sdk-go/aws/request
|
github.com/aws/aws-sdk-go/aws/request
|
||||||
|
github.com/aws/aws-sdk-go/aws/session
|
||||||
github.com/aws/aws-sdk-go/aws/signer/v4
|
github.com/aws/aws-sdk-go/aws/signer/v4
|
||||||
github.com/aws/aws-sdk-go/private/protocol
|
github.com/aws/aws-sdk-go/internal/ini
|
||||||
github.com/aws/aws-sdk-go/private/protocol/jsonrpc
|
|
||||||
github.com/aws/aws-sdk-go/internal/s3err
|
github.com/aws/aws-sdk-go/internal/s3err
|
||||||
|
github.com/aws/aws-sdk-go/internal/sdkio
|
||||||
|
github.com/aws/aws-sdk-go/internal/sdkmath
|
||||||
|
github.com/aws/aws-sdk-go/internal/sdkrand
|
||||||
|
github.com/aws/aws-sdk-go/internal/sdkuri
|
||||||
|
github.com/aws/aws-sdk-go/internal/shareddefaults
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol
|
||||||
github.com/aws/aws-sdk-go/private/protocol/eventstream
|
github.com/aws/aws-sdk-go/private/protocol/eventstream
|
||||||
github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi
|
github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/json/jsonutil
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/jsonrpc
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/query
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/query/queryutil
|
||||||
github.com/aws/aws-sdk-go/private/protocol/rest
|
github.com/aws/aws-sdk-go/private/protocol/rest
|
||||||
github.com/aws/aws-sdk-go/private/protocol/restxml
|
github.com/aws/aws-sdk-go/private/protocol/restxml
|
||||||
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
|
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
|
||||||
github.com/aws/aws-sdk-go/aws/arn
|
github.com/aws/aws-sdk-go/service/dynamodb
|
||||||
github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds
|
|
||||||
github.com/aws/aws-sdk-go/aws/credentials/stscreds
|
|
||||||
github.com/aws/aws-sdk-go/aws/defaults
|
|
||||||
github.com/aws/aws-sdk-go/aws/ec2metadata
|
|
||||||
github.com/aws/aws-sdk-go/aws/session
|
|
||||||
github.com/aws/aws-sdk-go/service/iam
|
github.com/aws/aws-sdk-go/service/iam
|
||||||
|
github.com/aws/aws-sdk-go/service/s3
|
||||||
github.com/aws/aws-sdk-go/service/sts
|
github.com/aws/aws-sdk-go/service/sts
|
||||||
github.com/aws/aws-sdk-go/internal/ini
|
|
||||||
github.com/aws/aws-sdk-go/internal/shareddefaults
|
|
||||||
github.com/aws/aws-sdk-go/internal/sdkrand
|
|
||||||
github.com/aws/aws-sdk-go/private/protocol/json/jsonutil
|
|
||||||
github.com/aws/aws-sdk-go/private/protocol/query
|
|
||||||
github.com/aws/aws-sdk-go/internal/sdkuri
|
|
||||||
github.com/aws/aws-sdk-go/service/sts/stsiface
|
github.com/aws/aws-sdk-go/service/sts/stsiface
|
||||||
github.com/aws/aws-sdk-go/aws/corehandlers
|
|
||||||
github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
|
|
||||||
github.com/aws/aws-sdk-go/aws/credentials/processcreds
|
|
||||||
github.com/aws/aws-sdk-go/aws/csm
|
|
||||||
github.com/aws/aws-sdk-go/private/protocol/query/queryutil
|
|
||||||
# github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
|
# github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
|
||||||
github.com/bgentry/go-netrc/netrc
|
github.com/bgentry/go-netrc/netrc
|
||||||
# github.com/bgentry/speakeasy v0.1.0
|
# github.com/bgentry/speakeasy v0.1.0
|
||||||
|
@ -133,19 +134,19 @@ github.com/bmatcuk/doublestar
|
||||||
# github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
|
# github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
|
||||||
github.com/chzyer/readline
|
github.com/chzyer/readline
|
||||||
# github.com/coreos/etcd v3.3.10+incompatible
|
# github.com/coreos/etcd v3.3.10+incompatible
|
||||||
|
github.com/coreos/etcd/auth/authpb
|
||||||
github.com/coreos/etcd/client
|
github.com/coreos/etcd/client
|
||||||
github.com/coreos/etcd/clientv3
|
github.com/coreos/etcd/clientv3
|
||||||
github.com/coreos/etcd/clientv3/concurrency
|
github.com/coreos/etcd/clientv3/concurrency
|
||||||
github.com/coreos/etcd/pkg/transport
|
|
||||||
github.com/coreos/etcd/pkg/pathutil
|
|
||||||
github.com/coreos/etcd/pkg/srv
|
|
||||||
github.com/coreos/etcd/pkg/types
|
|
||||||
github.com/coreos/etcd/version
|
|
||||||
github.com/coreos/etcd/auth/authpb
|
|
||||||
github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes
|
github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes
|
||||||
github.com/coreos/etcd/etcdserver/etcdserverpb
|
github.com/coreos/etcd/etcdserver/etcdserverpb
|
||||||
github.com/coreos/etcd/mvcc/mvccpb
|
github.com/coreos/etcd/mvcc/mvccpb
|
||||||
|
github.com/coreos/etcd/pkg/pathutil
|
||||||
|
github.com/coreos/etcd/pkg/srv
|
||||||
github.com/coreos/etcd/pkg/tlsutil
|
github.com/coreos/etcd/pkg/tlsutil
|
||||||
|
github.com/coreos/etcd/pkg/transport
|
||||||
|
github.com/coreos/etcd/pkg/types
|
||||||
|
github.com/coreos/etcd/version
|
||||||
# github.com/coreos/go-semver v0.2.0
|
# github.com/coreos/go-semver v0.2.0
|
||||||
github.com/coreos/go-semver/semver
|
github.com/coreos/go-semver/semver
|
||||||
# github.com/davecgh/go-spew v1.1.1
|
# github.com/davecgh/go-spew v1.1.1
|
||||||
|
@ -170,11 +171,11 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor
|
||||||
github.com/golang/mock/gomock
|
github.com/golang/mock/gomock
|
||||||
# github.com/golang/protobuf v1.3.2
|
# github.com/golang/protobuf v1.3.2
|
||||||
github.com/golang/protobuf/proto
|
github.com/golang/protobuf/proto
|
||||||
|
github.com/golang/protobuf/protoc-gen-go/descriptor
|
||||||
github.com/golang/protobuf/ptypes
|
github.com/golang/protobuf/ptypes
|
||||||
github.com/golang/protobuf/ptypes/any
|
github.com/golang/protobuf/ptypes/any
|
||||||
github.com/golang/protobuf/ptypes/duration
|
github.com/golang/protobuf/ptypes/duration
|
||||||
github.com/golang/protobuf/ptypes/timestamp
|
github.com/golang/protobuf/ptypes/timestamp
|
||||||
github.com/golang/protobuf/protoc-gen-go/descriptor
|
|
||||||
# github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db
|
# github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db
|
||||||
github.com/golang/snappy
|
github.com/golang/snappy
|
||||||
# github.com/google/go-cmp v0.3.0
|
# github.com/google/go-cmp v0.3.0
|
||||||
|
@ -192,14 +193,8 @@ github.com/google/uuid
|
||||||
github.com/googleapis/gax-go/v2
|
github.com/googleapis/gax-go/v2
|
||||||
# github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968
|
# github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968
|
||||||
github.com/gophercloud/gophercloud
|
github.com/gophercloud/gophercloud
|
||||||
|
github.com/gophercloud/gophercloud/internal
|
||||||
github.com/gophercloud/gophercloud/openstack
|
github.com/gophercloud/gophercloud/openstack
|
||||||
github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers
|
|
||||||
github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects
|
|
||||||
github.com/gophercloud/gophercloud/pagination
|
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v2/tokens
|
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v3/tokens
|
|
||||||
github.com/gophercloud/gophercloud/openstack/utils
|
|
||||||
github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts
|
|
||||||
github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions
|
github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions
|
||||||
github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes
|
github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes
|
||||||
github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots
|
github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots
|
||||||
|
@ -224,15 +219,19 @@ github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters
|
||||||
github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates
|
github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates
|
||||||
github.com/gophercloud/gophercloud/openstack/db/v1/configurations
|
github.com/gophercloud/gophercloud/openstack/db/v1/configurations
|
||||||
github.com/gophercloud/gophercloud/openstack/db/v1/databases
|
github.com/gophercloud/gophercloud/openstack/db/v1/databases
|
||||||
|
github.com/gophercloud/gophercloud/openstack/db/v1/datastores
|
||||||
github.com/gophercloud/gophercloud/openstack/db/v1/instances
|
github.com/gophercloud/gophercloud/openstack/db/v1/instances
|
||||||
github.com/gophercloud/gophercloud/openstack/db/v1/users
|
github.com/gophercloud/gophercloud/openstack/db/v1/users
|
||||||
github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets
|
github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets
|
||||||
github.com/gophercloud/gophercloud/openstack/dns/v2/zones
|
github.com/gophercloud/gophercloud/openstack/dns/v2/zones
|
||||||
|
github.com/gophercloud/gophercloud/openstack/identity/v2/tenants
|
||||||
|
github.com/gophercloud/gophercloud/openstack/identity/v2/tokens
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints
|
github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v3/groups
|
github.com/gophercloud/gophercloud/openstack/identity/v3/groups
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v3/projects
|
github.com/gophercloud/gophercloud/openstack/identity/v3/projects
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v3/roles
|
github.com/gophercloud/gophercloud/openstack/identity/v3/roles
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v3/services
|
github.com/gophercloud/gophercloud/openstack/identity/v3/services
|
||||||
|
github.com/gophercloud/gophercloud/openstack/identity/v3/tokens
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v3/users
|
github.com/gophercloud/gophercloud/openstack/identity/v3/users
|
||||||
github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata
|
github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata
|
||||||
github.com/gophercloud/gophercloud/openstack/imageservice/v2/images
|
github.com/gophercloud/gophercloud/openstack/imageservice/v2/images
|
||||||
|
@ -269,6 +268,9 @@ github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/sit
|
||||||
github.com/gophercloud/gophercloud/openstack/networking/v2/networks
|
github.com/gophercloud/gophercloud/openstack/networking/v2/networks
|
||||||
github.com/gophercloud/gophercloud/openstack/networking/v2/ports
|
github.com/gophercloud/gophercloud/openstack/networking/v2/ports
|
||||||
github.com/gophercloud/gophercloud/openstack/networking/v2/subnets
|
github.com/gophercloud/gophercloud/openstack/networking/v2/subnets
|
||||||
|
github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts
|
||||||
|
github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers
|
||||||
|
github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects
|
||||||
github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth
|
github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth
|
||||||
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/errors
|
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/errors
|
||||||
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages
|
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages
|
||||||
|
@ -276,17 +278,16 @@ github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservic
|
||||||
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks
|
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks
|
||||||
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares
|
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares
|
||||||
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots
|
github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots
|
||||||
github.com/gophercloud/gophercloud/openstack/identity/v2/tenants
|
github.com/gophercloud/gophercloud/openstack/utils
|
||||||
github.com/gophercloud/gophercloud/openstack/db/v1/datastores
|
github.com/gophercloud/gophercloud/pagination
|
||||||
github.com/gophercloud/gophercloud/internal
|
|
||||||
# github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01
|
# github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01
|
||||||
github.com/gophercloud/utils/openstack/clientconfig
|
github.com/gophercloud/utils/openstack/clientconfig
|
||||||
# github.com/hashicorp/aws-sdk-go-base v0.3.0
|
# github.com/hashicorp/aws-sdk-go-base v0.4.0
|
||||||
github.com/hashicorp/aws-sdk-go-base
|
github.com/hashicorp/aws-sdk-go-base
|
||||||
# github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089
|
# github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089
|
||||||
github.com/hashicorp/consul/api
|
github.com/hashicorp/consul/api
|
||||||
github.com/hashicorp/consul/testutil
|
|
||||||
github.com/hashicorp/consul/lib/freeport
|
github.com/hashicorp/consul/lib/freeport
|
||||||
|
github.com/hashicorp/consul/testutil
|
||||||
github.com/hashicorp/consul/testutil/retry
|
github.com/hashicorp/consul/testutil/retry
|
||||||
# github.com/hashicorp/errwrap v1.0.0
|
# github.com/hashicorp/errwrap v1.0.0
|
||||||
github.com/hashicorp/errwrap
|
github.com/hashicorp/errwrap
|
||||||
|
@ -328,31 +329,31 @@ github.com/hashicorp/hcl
|
||||||
github.com/hashicorp/hcl/hcl/ast
|
github.com/hashicorp/hcl/hcl/ast
|
||||||
github.com/hashicorp/hcl/hcl/parser
|
github.com/hashicorp/hcl/hcl/parser
|
||||||
github.com/hashicorp/hcl/hcl/printer
|
github.com/hashicorp/hcl/hcl/printer
|
||||||
github.com/hashicorp/hcl/hcl/token
|
|
||||||
github.com/hashicorp/hcl/json/parser
|
|
||||||
github.com/hashicorp/hcl/hcl/scanner
|
github.com/hashicorp/hcl/hcl/scanner
|
||||||
github.com/hashicorp/hcl/hcl/strconv
|
github.com/hashicorp/hcl/hcl/strconv
|
||||||
|
github.com/hashicorp/hcl/hcl/token
|
||||||
|
github.com/hashicorp/hcl/json/parser
|
||||||
github.com/hashicorp/hcl/json/scanner
|
github.com/hashicorp/hcl/json/scanner
|
||||||
github.com/hashicorp/hcl/json/token
|
github.com/hashicorp/hcl/json/token
|
||||||
# github.com/hashicorp/hcl/v2 v2.0.0
|
# github.com/hashicorp/hcl/v2 v2.0.0
|
||||||
github.com/hashicorp/hcl/v2
|
github.com/hashicorp/hcl/v2
|
||||||
github.com/hashicorp/hcl/v2/hclsyntax
|
github.com/hashicorp/hcl/v2/ext/dynblock
|
||||||
|
github.com/hashicorp/hcl/v2/ext/typeexpr
|
||||||
|
github.com/hashicorp/hcl/v2/gohcl
|
||||||
github.com/hashicorp/hcl/v2/hcldec
|
github.com/hashicorp/hcl/v2/hcldec
|
||||||
github.com/hashicorp/hcl/v2/hclwrite
|
|
||||||
github.com/hashicorp/hcl/v2/json
|
|
||||||
github.com/hashicorp/hcl/v2/hcled
|
github.com/hashicorp/hcl/v2/hcled
|
||||||
github.com/hashicorp/hcl/v2/hclparse
|
github.com/hashicorp/hcl/v2/hclparse
|
||||||
github.com/hashicorp/hcl/v2/gohcl
|
github.com/hashicorp/hcl/v2/hclsyntax
|
||||||
github.com/hashicorp/hcl/v2/ext/typeexpr
|
|
||||||
github.com/hashicorp/hcl/v2/ext/dynblock
|
|
||||||
github.com/hashicorp/hcl/v2/hcltest
|
github.com/hashicorp/hcl/v2/hcltest
|
||||||
|
github.com/hashicorp/hcl/v2/hclwrite
|
||||||
|
github.com/hashicorp/hcl/v2/json
|
||||||
# github.com/hashicorp/hcl2 v0.0.0-20190821123243-0c888d1241f6
|
# github.com/hashicorp/hcl2 v0.0.0-20190821123243-0c888d1241f6
|
||||||
github.com/hashicorp/hcl2/gohcl
|
github.com/hashicorp/hcl2/gohcl
|
||||||
github.com/hashicorp/hcl2/hcl
|
github.com/hashicorp/hcl2/hcl
|
||||||
github.com/hashicorp/hcl2/hcl/hclsyntax
|
github.com/hashicorp/hcl2/hcl/hclsyntax
|
||||||
|
github.com/hashicorp/hcl2/hcl/json
|
||||||
github.com/hashicorp/hcl2/hclparse
|
github.com/hashicorp/hcl2/hclparse
|
||||||
github.com/hashicorp/hcl2/hclwrite
|
github.com/hashicorp/hcl2/hclwrite
|
||||||
github.com/hashicorp/hcl2/hcl/json
|
|
||||||
# github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590
|
# github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590
|
||||||
github.com/hashicorp/hil
|
github.com/hashicorp/hil
|
||||||
github.com/hashicorp/hil/ast
|
github.com/hashicorp/hil/ast
|
||||||
|
@ -365,9 +366,9 @@ github.com/hashicorp/serf/coordinate
|
||||||
# github.com/hashicorp/terraform-config-inspect v0.0.0-20190821133035-82a99dc22ef4
|
# github.com/hashicorp/terraform-config-inspect v0.0.0-20190821133035-82a99dc22ef4
|
||||||
github.com/hashicorp/terraform-config-inspect/tfconfig
|
github.com/hashicorp/terraform-config-inspect/tfconfig
|
||||||
# github.com/hashicorp/vault v0.10.4
|
# github.com/hashicorp/vault v0.10.4
|
||||||
github.com/hashicorp/vault/helper/pgpkeys
|
|
||||||
github.com/hashicorp/vault/helper/jsonutil
|
|
||||||
github.com/hashicorp/vault/helper/compressutil
|
github.com/hashicorp/vault/helper/compressutil
|
||||||
|
github.com/hashicorp/vault/helper/jsonutil
|
||||||
|
github.com/hashicorp/vault/helper/pgpkeys
|
||||||
# github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb
|
# github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb
|
||||||
github.com/hashicorp/yamux
|
github.com/hashicorp/yamux
|
||||||
# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
|
# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
|
||||||
|
@ -375,23 +376,23 @@ github.com/jmespath/go-jmespath
|
||||||
# github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926
|
# github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926
|
||||||
github.com/joyent/triton-go
|
github.com/joyent/triton-go
|
||||||
github.com/joyent/triton-go/authentication
|
github.com/joyent/triton-go/authentication
|
||||||
|
github.com/joyent/triton-go/client
|
||||||
github.com/joyent/triton-go/errors
|
github.com/joyent/triton-go/errors
|
||||||
github.com/joyent/triton-go/storage
|
github.com/joyent/triton-go/storage
|
||||||
github.com/joyent/triton-go/client
|
|
||||||
# github.com/json-iterator/go v1.1.5
|
# github.com/json-iterator/go v1.1.5
|
||||||
github.com/json-iterator/go
|
github.com/json-iterator/go
|
||||||
# github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
# github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||||
github.com/kardianos/osext
|
github.com/kardianos/osext
|
||||||
# github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba
|
# github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba
|
||||||
github.com/keybase/go-crypto/openpgp
|
|
||||||
github.com/keybase/go-crypto/openpgp/packet
|
|
||||||
github.com/keybase/go-crypto/openpgp/armor
|
|
||||||
github.com/keybase/go-crypto/openpgp/errors
|
|
||||||
github.com/keybase/go-crypto/openpgp/s2k
|
|
||||||
github.com/keybase/go-crypto/rsa
|
|
||||||
github.com/keybase/go-crypto/brainpool
|
github.com/keybase/go-crypto/brainpool
|
||||||
github.com/keybase/go-crypto/cast5
|
github.com/keybase/go-crypto/cast5
|
||||||
|
github.com/keybase/go-crypto/openpgp
|
||||||
|
github.com/keybase/go-crypto/openpgp/armor
|
||||||
github.com/keybase/go-crypto/openpgp/elgamal
|
github.com/keybase/go-crypto/openpgp/elgamal
|
||||||
|
github.com/keybase/go-crypto/openpgp/errors
|
||||||
|
github.com/keybase/go-crypto/openpgp/packet
|
||||||
|
github.com/keybase/go-crypto/openpgp/s2k
|
||||||
|
github.com/keybase/go-crypto/rsa
|
||||||
# github.com/lib/pq v1.0.0
|
# github.com/lib/pq v1.0.0
|
||||||
github.com/lib/pq
|
github.com/lib/pq
|
||||||
github.com/lib/pq/oid
|
github.com/lib/pq/oid
|
||||||
|
@ -450,8 +451,8 @@ github.com/pkg/browser
|
||||||
github.com/pkg/errors
|
github.com/pkg/errors
|
||||||
# github.com/posener/complete v1.2.1
|
# github.com/posener/complete v1.2.1
|
||||||
github.com/posener/complete
|
github.com/posener/complete
|
||||||
github.com/posener/complete/cmd/install
|
|
||||||
github.com/posener/complete/cmd
|
github.com/posener/complete/cmd
|
||||||
|
github.com/posener/complete/cmd/install
|
||||||
github.com/posener/complete/match
|
github.com/posener/complete/match
|
||||||
# github.com/satori/go.uuid v1.2.0
|
# github.com/satori/go.uuid v1.2.0
|
||||||
github.com/satori/go.uuid
|
github.com/satori/go.uuid
|
||||||
|
@ -466,9 +467,9 @@ github.com/terraform-providers/terraform-provider-openstack/openstack
|
||||||
github.com/ugorji/go/codec
|
github.com/ugorji/go/codec
|
||||||
# github.com/ulikunitz/xz v0.5.5
|
# github.com/ulikunitz/xz v0.5.5
|
||||||
github.com/ulikunitz/xz
|
github.com/ulikunitz/xz
|
||||||
|
github.com/ulikunitz/xz/internal/hash
|
||||||
github.com/ulikunitz/xz/internal/xlog
|
github.com/ulikunitz/xz/internal/xlog
|
||||||
github.com/ulikunitz/xz/lzma
|
github.com/ulikunitz/xz/lzma
|
||||||
github.com/ulikunitz/xz/internal/hash
|
|
||||||
# github.com/vmihailenco/msgpack v4.0.1+incompatible
|
# github.com/vmihailenco/msgpack v4.0.1+incompatible
|
||||||
github.com/vmihailenco/msgpack
|
github.com/vmihailenco/msgpack
|
||||||
github.com/vmihailenco/msgpack/codes
|
github.com/vmihailenco/msgpack/codes
|
||||||
|
@ -478,87 +479,82 @@ github.com/xanzy/ssh-agent
|
||||||
github.com/xlab/treeprint
|
github.com/xlab/treeprint
|
||||||
# github.com/zclconf/go-cty v1.1.0
|
# github.com/zclconf/go-cty v1.1.0
|
||||||
github.com/zclconf/go-cty/cty
|
github.com/zclconf/go-cty/cty
|
||||||
github.com/zclconf/go-cty/cty/gocty
|
|
||||||
github.com/zclconf/go-cty/cty/convert
|
github.com/zclconf/go-cty/cty/convert
|
||||||
github.com/zclconf/go-cty/cty/json
|
|
||||||
github.com/zclconf/go-cty/cty/function
|
github.com/zclconf/go-cty/cty/function
|
||||||
github.com/zclconf/go-cty/cty/function/stdlib
|
github.com/zclconf/go-cty/cty/function/stdlib
|
||||||
|
github.com/zclconf/go-cty/cty/gocty
|
||||||
|
github.com/zclconf/go-cty/cty/json
|
||||||
github.com/zclconf/go-cty/cty/msgpack
|
github.com/zclconf/go-cty/cty/msgpack
|
||||||
github.com/zclconf/go-cty/cty/set
|
github.com/zclconf/go-cty/cty/set
|
||||||
# github.com/zclconf/go-cty-yaml v1.0.1
|
# github.com/zclconf/go-cty-yaml v1.0.1
|
||||||
github.com/zclconf/go-cty-yaml
|
github.com/zclconf/go-cty-yaml
|
||||||
# go.opencensus.io v0.22.0
|
# go.opencensus.io v0.22.0
|
||||||
go.opencensus.io/trace
|
go.opencensus.io
|
||||||
go.opencensus.io/plugin/ochttp
|
|
||||||
go.opencensus.io/internal
|
go.opencensus.io/internal
|
||||||
go.opencensus.io/trace/internal
|
go.opencensus.io/internal/tagencoding
|
||||||
go.opencensus.io/trace/tracestate
|
go.opencensus.io/metric/metricdata
|
||||||
|
go.opencensus.io/metric/metricproducer
|
||||||
|
go.opencensus.io/plugin/ochttp
|
||||||
go.opencensus.io/plugin/ochttp/propagation/b3
|
go.opencensus.io/plugin/ochttp/propagation/b3
|
||||||
|
go.opencensus.io/resource
|
||||||
go.opencensus.io/stats
|
go.opencensus.io/stats
|
||||||
|
go.opencensus.io/stats/internal
|
||||||
go.opencensus.io/stats/view
|
go.opencensus.io/stats/view
|
||||||
go.opencensus.io/tag
|
go.opencensus.io/tag
|
||||||
|
go.opencensus.io/trace
|
||||||
|
go.opencensus.io/trace/internal
|
||||||
go.opencensus.io/trace/propagation
|
go.opencensus.io/trace/propagation
|
||||||
go.opencensus.io
|
go.opencensus.io/trace/tracestate
|
||||||
go.opencensus.io/metric/metricdata
|
|
||||||
go.opencensus.io/stats/internal
|
|
||||||
go.opencensus.io/internal/tagencoding
|
|
||||||
go.opencensus.io/metric/metricproducer
|
|
||||||
go.opencensus.io/resource
|
|
||||||
# golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
# golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
||||||
golang.org/x/crypto/ssh
|
|
||||||
golang.org/x/crypto/ssh/agent
|
|
||||||
golang.org/x/crypto/ssh/knownhosts
|
|
||||||
golang.org/x/crypto/bcrypt
|
golang.org/x/crypto/bcrypt
|
||||||
golang.org/x/crypto/openpgp
|
golang.org/x/crypto/blowfish
|
||||||
golang.org/x/crypto/pkcs12
|
golang.org/x/crypto/cast5
|
||||||
golang.org/x/crypto/curve25519
|
golang.org/x/crypto/curve25519
|
||||||
golang.org/x/crypto/ed25519
|
golang.org/x/crypto/ed25519
|
||||||
|
golang.org/x/crypto/ed25519/internal/edwards25519
|
||||||
golang.org/x/crypto/internal/chacha20
|
golang.org/x/crypto/internal/chacha20
|
||||||
golang.org/x/crypto/poly1305
|
golang.org/x/crypto/internal/subtle
|
||||||
golang.org/x/crypto/blowfish
|
golang.org/x/crypto/md4
|
||||||
|
golang.org/x/crypto/openpgp
|
||||||
golang.org/x/crypto/openpgp/armor
|
golang.org/x/crypto/openpgp/armor
|
||||||
|
golang.org/x/crypto/openpgp/elgamal
|
||||||
golang.org/x/crypto/openpgp/errors
|
golang.org/x/crypto/openpgp/errors
|
||||||
golang.org/x/crypto/openpgp/packet
|
golang.org/x/crypto/openpgp/packet
|
||||||
golang.org/x/crypto/openpgp/s2k
|
golang.org/x/crypto/openpgp/s2k
|
||||||
|
golang.org/x/crypto/pkcs12
|
||||||
golang.org/x/crypto/pkcs12/internal/rc2
|
golang.org/x/crypto/pkcs12/internal/rc2
|
||||||
golang.org/x/crypto/ed25519/internal/edwards25519
|
golang.org/x/crypto/poly1305
|
||||||
golang.org/x/crypto/internal/subtle
|
golang.org/x/crypto/ssh
|
||||||
golang.org/x/crypto/md4
|
golang.org/x/crypto/ssh/agent
|
||||||
golang.org/x/crypto/cast5
|
golang.org/x/crypto/ssh/knownhosts
|
||||||
golang.org/x/crypto/openpgp/elgamal
|
|
||||||
# golang.org/x/net v0.0.0-20190620200207-3b0461eec859
|
# golang.org/x/net v0.0.0-20190620200207-3b0461eec859
|
||||||
golang.org/x/net/context
|
golang.org/x/net/context
|
||||||
golang.org/x/net/idna
|
|
||||||
golang.org/x/net/trace
|
|
||||||
golang.org/x/net/context/ctxhttp
|
golang.org/x/net/context/ctxhttp
|
||||||
|
golang.org/x/net/html
|
||||||
|
golang.org/x/net/html/atom
|
||||||
golang.org/x/net/html/charset
|
golang.org/x/net/html/charset
|
||||||
golang.org/x/net/internal/timeseries
|
golang.org/x/net/http/httpguts
|
||||||
golang.org/x/net/http2
|
golang.org/x/net/http2
|
||||||
golang.org/x/net/http2/hpack
|
golang.org/x/net/http2/hpack
|
||||||
golang.org/x/net/html
|
golang.org/x/net/idna
|
||||||
golang.org/x/net/http/httpguts
|
golang.org/x/net/internal/timeseries
|
||||||
golang.org/x/net/html/atom
|
golang.org/x/net/trace
|
||||||
# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||||
golang.org/x/oauth2
|
golang.org/x/oauth2
|
||||||
golang.org/x/oauth2/jwt
|
golang.org/x/oauth2/google
|
||||||
golang.org/x/oauth2/internal
|
golang.org/x/oauth2/internal
|
||||||
golang.org/x/oauth2/jws
|
golang.org/x/oauth2/jws
|
||||||
golang.org/x/oauth2/google
|
golang.org/x/oauth2/jwt
|
||||||
# golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa
|
# golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa
|
||||||
golang.org/x/sys/windows
|
|
||||||
golang.org/x/sys/unix
|
|
||||||
golang.org/x/sys/cpu
|
golang.org/x/sys/cpu
|
||||||
|
golang.org/x/sys/unix
|
||||||
|
golang.org/x/sys/windows
|
||||||
# golang.org/x/text v0.3.2
|
# golang.org/x/text v0.3.2
|
||||||
golang.org/x/text/unicode/norm
|
|
||||||
golang.org/x/text/transform
|
|
||||||
golang.org/x/text/secure/bidirule
|
|
||||||
golang.org/x/text/unicode/bidi
|
|
||||||
golang.org/x/text/encoding
|
golang.org/x/text/encoding
|
||||||
golang.org/x/text/encoding/charmap
|
golang.org/x/text/encoding/charmap
|
||||||
golang.org/x/text/encoding/htmlindex
|
golang.org/x/text/encoding/htmlindex
|
||||||
golang.org/x/text/language
|
|
||||||
golang.org/x/text/encoding/internal/identifier
|
|
||||||
golang.org/x/text/encoding/internal
|
golang.org/x/text/encoding/internal
|
||||||
|
golang.org/x/text/encoding/internal/identifier
|
||||||
golang.org/x/text/encoding/japanese
|
golang.org/x/text/encoding/japanese
|
||||||
golang.org/x/text/encoding/korean
|
golang.org/x/text/encoding/korean
|
||||||
golang.org/x/text/encoding/simplifiedchinese
|
golang.org/x/text/encoding/simplifiedchinese
|
||||||
|
@ -566,58 +562,62 @@ golang.org/x/text/encoding/traditionalchinese
|
||||||
golang.org/x/text/encoding/unicode
|
golang.org/x/text/encoding/unicode
|
||||||
golang.org/x/text/internal/language
|
golang.org/x/text/internal/language
|
||||||
golang.org/x/text/internal/language/compact
|
golang.org/x/text/internal/language/compact
|
||||||
golang.org/x/text/internal/utf8internal
|
|
||||||
golang.org/x/text/runes
|
|
||||||
golang.org/x/text/internal/tag
|
golang.org/x/text/internal/tag
|
||||||
|
golang.org/x/text/internal/utf8internal
|
||||||
|
golang.org/x/text/language
|
||||||
|
golang.org/x/text/runes
|
||||||
|
golang.org/x/text/secure/bidirule
|
||||||
|
golang.org/x/text/transform
|
||||||
|
golang.org/x/text/unicode/bidi
|
||||||
|
golang.org/x/text/unicode/norm
|
||||||
# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||||
golang.org/x/time/rate
|
golang.org/x/time/rate
|
||||||
# google.golang.org/api v0.9.0
|
# google.golang.org/api v0.9.0
|
||||||
|
google.golang.org/api/gensupport
|
||||||
|
google.golang.org/api/googleapi
|
||||||
|
google.golang.org/api/googleapi/internal/uritemplates
|
||||||
|
google.golang.org/api/googleapi/transport
|
||||||
|
google.golang.org/api/internal
|
||||||
google.golang.org/api/iterator
|
google.golang.org/api/iterator
|
||||||
google.golang.org/api/option
|
google.golang.org/api/option
|
||||||
google.golang.org/api/googleapi
|
|
||||||
google.golang.org/api/storage/v1
|
google.golang.org/api/storage/v1
|
||||||
google.golang.org/api/transport/http
|
google.golang.org/api/transport/http
|
||||||
google.golang.org/api/internal
|
|
||||||
google.golang.org/api/googleapi/internal/uritemplates
|
|
||||||
google.golang.org/api/gensupport
|
|
||||||
google.golang.org/api/googleapi/transport
|
|
||||||
google.golang.org/api/transport/http/internal/propagation
|
google.golang.org/api/transport/http/internal/propagation
|
||||||
# google.golang.org/appengine v1.6.1
|
# google.golang.org/appengine v1.6.1
|
||||||
google.golang.org/appengine/urlfetch
|
|
||||||
google.golang.org/appengine
|
google.golang.org/appengine
|
||||||
google.golang.org/appengine/datastore
|
google.golang.org/appengine/datastore
|
||||||
google.golang.org/appengine/internal
|
|
||||||
google.golang.org/appengine/internal/urlfetch
|
|
||||||
google.golang.org/appengine/internal/app_identity
|
|
||||||
google.golang.org/appengine/internal/modules
|
|
||||||
google.golang.org/appengine/datastore/internal/cloudkey
|
google.golang.org/appengine/datastore/internal/cloudkey
|
||||||
google.golang.org/appengine/internal/datastore
|
|
||||||
google.golang.org/appengine/internal/base
|
|
||||||
google.golang.org/appengine/internal/log
|
|
||||||
google.golang.org/appengine/internal/remote_api
|
|
||||||
google.golang.org/appengine/datastore/internal/cloudpb
|
google.golang.org/appengine/datastore/internal/cloudpb
|
||||||
|
google.golang.org/appengine/internal
|
||||||
|
google.golang.org/appengine/internal/app_identity
|
||||||
|
google.golang.org/appengine/internal/base
|
||||||
|
google.golang.org/appengine/internal/datastore
|
||||||
|
google.golang.org/appengine/internal/log
|
||||||
|
google.golang.org/appengine/internal/modules
|
||||||
|
google.golang.org/appengine/internal/remote_api
|
||||||
|
google.golang.org/appengine/internal/urlfetch
|
||||||
|
google.golang.org/appengine/urlfetch
|
||||||
# google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
|
# google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
|
||||||
google.golang.org/genproto/googleapis/iam/v1
|
|
||||||
google.golang.org/genproto/googleapis/rpc/status
|
|
||||||
google.golang.org/genproto/googleapis/rpc/code
|
|
||||||
google.golang.org/genproto/googleapis/api/annotations
|
google.golang.org/genproto/googleapis/api/annotations
|
||||||
|
google.golang.org/genproto/googleapis/iam/v1
|
||||||
|
google.golang.org/genproto/googleapis/rpc/code
|
||||||
|
google.golang.org/genproto/googleapis/rpc/status
|
||||||
google.golang.org/genproto/googleapis/type/expr
|
google.golang.org/genproto/googleapis/type/expr
|
||||||
# google.golang.org/grpc v1.21.1
|
# google.golang.org/grpc v1.21.1
|
||||||
google.golang.org/grpc
|
google.golang.org/grpc
|
||||||
google.golang.org/grpc/test/bufconn
|
|
||||||
google.golang.org/grpc/codes
|
|
||||||
google.golang.org/grpc/status
|
|
||||||
google.golang.org/grpc/metadata
|
|
||||||
google.golang.org/grpc/credentials
|
|
||||||
google.golang.org/grpc/health
|
|
||||||
google.golang.org/grpc/health/grpc_health_v1
|
|
||||||
google.golang.org/grpc/grpclog
|
|
||||||
google.golang.org/grpc/keepalive
|
|
||||||
google.golang.org/grpc/balancer
|
google.golang.org/grpc/balancer
|
||||||
|
google.golang.org/grpc/balancer/base
|
||||||
google.golang.org/grpc/balancer/roundrobin
|
google.golang.org/grpc/balancer/roundrobin
|
||||||
|
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
||||||
|
google.golang.org/grpc/codes
|
||||||
google.golang.org/grpc/connectivity
|
google.golang.org/grpc/connectivity
|
||||||
|
google.golang.org/grpc/credentials
|
||||||
|
google.golang.org/grpc/credentials/internal
|
||||||
google.golang.org/grpc/encoding
|
google.golang.org/grpc/encoding
|
||||||
google.golang.org/grpc/encoding/proto
|
google.golang.org/grpc/encoding/proto
|
||||||
|
google.golang.org/grpc/grpclog
|
||||||
|
google.golang.org/grpc/health
|
||||||
|
google.golang.org/grpc/health/grpc_health_v1
|
||||||
google.golang.org/grpc/internal
|
google.golang.org/grpc/internal
|
||||||
google.golang.org/grpc/internal/backoff
|
google.golang.org/grpc/internal/backoff
|
||||||
google.golang.org/grpc/internal/balancerload
|
google.golang.org/grpc/internal/balancerload
|
||||||
|
@ -626,18 +626,19 @@ google.golang.org/grpc/internal/channelz
|
||||||
google.golang.org/grpc/internal/envconfig
|
google.golang.org/grpc/internal/envconfig
|
||||||
google.golang.org/grpc/internal/grpcrand
|
google.golang.org/grpc/internal/grpcrand
|
||||||
google.golang.org/grpc/internal/grpcsync
|
google.golang.org/grpc/internal/grpcsync
|
||||||
|
google.golang.org/grpc/internal/syscall
|
||||||
google.golang.org/grpc/internal/transport
|
google.golang.org/grpc/internal/transport
|
||||||
|
google.golang.org/grpc/keepalive
|
||||||
|
google.golang.org/grpc/metadata
|
||||||
google.golang.org/grpc/naming
|
google.golang.org/grpc/naming
|
||||||
google.golang.org/grpc/peer
|
google.golang.org/grpc/peer
|
||||||
google.golang.org/grpc/resolver
|
google.golang.org/grpc/resolver
|
||||||
google.golang.org/grpc/resolver/dns
|
google.golang.org/grpc/resolver/dns
|
||||||
google.golang.org/grpc/resolver/passthrough
|
google.golang.org/grpc/resolver/passthrough
|
||||||
google.golang.org/grpc/stats
|
google.golang.org/grpc/stats
|
||||||
|
google.golang.org/grpc/status
|
||||||
google.golang.org/grpc/tap
|
google.golang.org/grpc/tap
|
||||||
google.golang.org/grpc/credentials/internal
|
google.golang.org/grpc/test/bufconn
|
||||||
google.golang.org/grpc/balancer/base
|
|
||||||
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
|
||||||
google.golang.org/grpc/internal/syscall
|
|
||||||
# gopkg.in/ini.v1 v1.42.0
|
# gopkg.in/ini.v1 v1.42.0
|
||||||
gopkg.in/ini.v1
|
gopkg.in/ini.v1
|
||||||
# gopkg.in/yaml.v2 v2.2.2
|
# gopkg.in/yaml.v2 v2.2.2
|
||||||
|
|
Loading…
Reference in New Issue