Add tablestore config to store state lock

This commit is contained in:
He Guimin 2019-04-11 07:11:10 +08:00
parent b887d44712
commit 3f44dd9dec
134 changed files with 18523 additions and 10361 deletions

View File

@ -4,6 +4,15 @@ package addrs
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidResourceMode-0]
_ = x[ManagedResourceMode-77]
_ = x[DataResourceMode-68]
}
const (
_ResourceMode_name_0 = "InvalidResourceMode"
_ResourceMode_name_1 = "DataResourceMode"

View File

@ -4,6 +4,15 @@ package local
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[countHookActionAdd-0]
_ = x[countHookActionChange-1]
_ = x[countHookActionRemove-2]
}
const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove"
var _countHookAction_index = [...]uint8{0, 18, 39, 60}

View File

@ -4,6 +4,16 @@ package backend
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[OperationTypeInvalid-0]
_ = x[OperationTypeRefresh-1]
_ = x[OperationTypePlan-2]
_ = x[OperationTypeApply-3]
}
const _OperationType_name = "OperationTypeInvalidOperationTypeRefreshOperationTypePlanOperationTypeApply"
var _OperationType_index = [...]uint8{0, 20, 40, 57, 75}

View File

@ -11,9 +11,8 @@ import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/resource"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
"github.com/aliyun/alibaba-cloud-sdk-go/services/location"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/terraform/version"
"log"
@ -44,7 +43,7 @@ func New() backend.Backend {
Type: schema.TypeString,
Optional: true,
Description: "Alibaba Cloud Security Token",
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", os.Getenv("SECURITY_TOKEN")),
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", ""),
},
"region": &schema.Schema{
@ -53,7 +52,12 @@ func New() backend.Backend {
Description: "The region of the OSS bucket.",
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_DEFAULT_REGION")),
},
"tablestore_endpoint": {
Type: schema.TypeString,
Optional: true,
Description: "A custom endpoint for the TableStore API",
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_TABLESTORE_ENDPOINT", ""),
},
"endpoint": {
Type: schema.TypeString,
Optional: true,
@ -67,30 +71,38 @@ func New() backend.Backend {
Description: "The name of the OSS bucket",
},
"path": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "The path relative to your object storage directory where the state file will be stored.",
},
"name": &schema.Schema{
"prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The name of the state file inside the bucket",
Description: "The directory where state files will be saved inside the bucket",
Default: "env:",
ValidateFunc: func(v interface{}, s string) ([]string, []error) {
prefix := v.(string)
if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") {
return nil, []error{fmt.Errorf("workspace_key_prefix must not start with '/' or './'")}
}
return nil, nil
},
},
"key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "The path of the state file inside the bucket",
ValidateFunc: func(v interface{}, s string) ([]string, []error) {
if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") {
return nil, []error{fmt.Errorf("name can not start and end with '/'")}
return nil, []error{fmt.Errorf("key can not start and end with '/'")}
}
return nil, nil
},
Default: "terraform.tfstate",
},
"lock": &schema.Schema{
Type: schema.TypeBool,
"tablestore_table": {
Type: schema.TypeString,
Optional: true,
Description: "Whether to lock state access. Defaults to true",
Default: true,
Description: "TableStore table for state locking and consistency",
Default: "",
},
"encrypt": &schema.Schema{
@ -130,14 +142,16 @@ type Backend struct {
// The fields below are set from configure
ossClient *oss.Client
otsClient *tablestore.TableStoreClient
bucketName string
statePath string
stateName string
statePrefix string
stateKey string
serverSideEncryption bool
acl string
endpoint string
lock bool
otsEndpoint string
otsTable string
}
func (b *Backend) configure(ctx context.Context) error {
@ -149,27 +163,20 @@ func (b *Backend) configure(ctx context.Context) error {
d := schema.FromContextBackendConfig(ctx)
b.bucketName = d.Get("bucket").(string)
dir := strings.Trim(d.Get("path").(string), "/")
if strings.HasPrefix(dir, "./") {
dir = strings.TrimPrefix(dir, "./")
}
b.statePath = dir
b.stateName = d.Get("name").(string)
b.statePrefix = strings.TrimPrefix(strings.Trim(d.Get("prefix").(string), "/"), "./")
b.stateKey = d.Get("key").(string)
b.serverSideEncryption = d.Get("encrypt").(bool)
b.acl = d.Get("acl").(string)
b.lock = d.Get("lock").(bool)
access_key := d.Get("access_key").(string)
secret_key := d.Get("secret_key").(string)
security_token := d.Get("security_token").(string)
accessKey := d.Get("access_key").(string)
secretKey := d.Get("secret_key").(string)
securityToken := d.Get("security_token").(string)
region := d.Get("region").(string)
endpoint := d.Get("endpoint").(string)
schma := "https"
if endpoint == "" {
endpointItem, _ := b.getOSSEndpointByRegion(access_key, secret_key, security_token, region)
endpointItem, _ := b.getOSSEndpointByRegion(accessKey, secretKey, securityToken, region)
if endpointItem != nil && len(endpointItem.Endpoint) > 0 {
if len(endpointItem.Protocols.Protocols) > 0 {
// HTTP or HTTPS
@ -191,13 +198,23 @@ func (b *Backend) configure(ctx context.Context) error {
}
log.Printf("[DEBUG] Instantiate OSS client using endpoint: %#v", endpoint)
var options []oss.ClientOption
if security_token != "" {
options = append(options, oss.SecurityToken(security_token))
if securityToken != "" {
options = append(options, oss.SecurityToken(securityToken))
}
options = append(options, oss.UserAgent(fmt.Sprintf("%s/%s", TerraformUA, TerraformVersion)))
client, err := oss.New(endpoint, access_key, secret_key, options...)
client, err := oss.New(endpoint, accessKey, secretKey, options...)
b.ossClient = client
otsEndpoint := d.Get("tablestore_endpoint").(string)
if otsEndpoint != "" {
if !strings.HasPrefix(otsEndpoint, "http") {
otsEndpoint = fmt.Sprintf("%s://%s", schma, otsEndpoint)
}
b.otsEndpoint = otsEndpoint
parts := strings.Split(strings.TrimPrefix(strings.TrimPrefix(otsEndpoint, "https://"), "http://"), ".")
b.otsClient = tablestore.NewClientWithConfig(otsEndpoint, parts[0], accessKey, secretKey, securityToken, tablestore.NewDefaultTableStoreConfig())
}
b.otsTable = d.Get("tablestore_table").(string)
return err
}
@ -222,11 +239,6 @@ func (b *Backend) getOSSEndpointByRegion(access_key, secret_key, security_token,
}
func getSdkConfig() *sdk.Config {
// Fix bug "open /usr/local/go/lib/time/zoneinfo.zip: no such file or directory" which happened in windows.
if data, ok := resource.GetTZData("GMT"); ok {
utils.TZData = data
utils.LoadLocationFromTZData = time.LoadLocationFromTZData
}
return sdk.NewConfig().
WithMaxRetryTime(5).
WithTimeout(time.Duration(30) * time.Second).

View File

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/states"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
"log"
"path"
)
@ -33,7 +34,32 @@ func (b *Backend) remoteClient(name string) (*RemoteClient, error) {
lockFile: b.lockFile(name),
serverSideEncryption: b.serverSideEncryption,
acl: b.acl,
doLock: b.lock,
otsTable: b.otsTable,
otsClient: b.otsClient,
}
if b.otsEndpoint != "" && b.otsTable != "" {
table, err := b.otsClient.DescribeTable(&tablestore.DescribeTableRequest{
TableName: b.otsTable,
})
if err != nil {
return client, fmt.Errorf("Error describing table store %s: %#v", b.otsTable, err)
}
for _, t := range table.TableMeta.SchemaEntry {
pkMeta := TableStorePrimaryKeyMeta{
PKName: *t.Name,
}
if *t.Type == tablestore.PrimaryKeyType_INTEGER {
pkMeta.PKType = "Integer"
} else if *t.Type == tablestore.PrimaryKeyType_STRING {
pkMeta.PKType = "String"
} else if *t.Type == tablestore.PrimaryKeyType_BINARY {
pkMeta.PKType = "Binary"
} else {
return client, fmt.Errorf("Unsupported PrimaryKey type: %d.", *t.Type)
}
client.otsTabkePK = pkMeta
break
}
}
return client, nil
@ -46,7 +72,7 @@ func (b *Backend) Workspaces() ([]string, error) {
}
var options []oss.Option
options = append(options, oss.Prefix(b.statePath))
options = append(options, oss.Prefix(b.statePrefix+"/"))
resp, err := bucket.ListObjects(options...)
if err != nil {
@ -54,9 +80,17 @@ func (b *Backend) Workspaces() ([]string, error) {
}
result := []string{backend.DefaultStateName}
prefix := b.statePrefix
for _, obj := range resp.Objects {
if b.keyEnv(obj.Key) != "" {
result = append(result, b.keyEnv(obj.Key))
// we have 3 parts, the state prefix, the workspace name, and the state file: <prefix>/<worksapce-name>/<key>
if path.Join(b.statePrefix, b.stateKey) == obj.Key {
// filter the default workspace
continue
}
parts := strings.Split(strings.TrimPrefix(obj.Key, prefix+"/"), "/")
if len(parts) > 0 && parts[0] != "" {
result = append(result, parts[0])
}
}
@ -83,16 +117,13 @@ func (b *Backend) StateMgr(name string) (state.State, error) {
}
stateMgr := &remote.State{Client: client}
if !b.lock {
stateMgr.DisableLocks()
}
// Check to see if this state already exists.
existing, err := b.Workspaces()
if err != nil {
return nil, err
}
log.Printf("[DEBUG] Current state name: %s. All States:%#v", name, existing)
log.Printf("[DEBUG] Current workspace name: %s. All workspaces:%#v", name, existing)
exists := false
for _, s := range existing {
@ -146,41 +177,15 @@ func (b *Backend) StateMgr(name string) (state.State, error) {
return stateMgr, nil
}
// extract the object name from the OSS key
func (b *Backend) keyEnv(key string) string {
// we have 3 parts, the state path, the state name, and the state file
parts := strings.Split(key, "/")
length := len(parts)
if length < 3 {
// use default state
return ""
}
// shouldn't happen since we listed by prefix
if strings.Join(parts[0:length-2], "/") != b.statePath {
return ""
}
// not our key, so don't include it in our listing
if parts[length-1] != b.stateName {
return ""
}
return parts[length-2]
}
func (b *Backend) stateFile(name string) string {
if name == backend.DefaultStateName {
return path.Join(b.statePath, b.stateName)
return path.Join(b.statePrefix, b.stateKey)
}
return path.Join(b.statePath, name, b.stateName)
return path.Join(b.statePrefix, name, b.stateKey)
}
func (b *Backend) lockFile(name string) string {
if name == backend.DefaultStateName {
return path.Join(b.statePath, b.stateName+lockFileSuffix)
}
return path.Join(b.statePath, name, b.stateName+lockFileSuffix)
return b.stateFile(name) + lockFileSuffix
}
const stateUnlockError = `

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config/hcl2shim"
"strings"
@ -33,22 +34,24 @@ func TestBackendConfig(t *testing.T) {
config := map[string]interface{}{
"region": "cn-beijing",
"bucket": "terraform-backend-oss-test",
"path": "mystate",
"name": "first.tfstate",
"prefix": "mystate",
"key": "first.tfstate",
"tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com",
"tablestore_table": "TableStore",
}
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend)
if !strings.HasPrefix(b.ossClient.Config.Endpoint, "http://oss-cn-beijing") {
if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") {
t.Fatalf("Incorrect region was provided")
}
if b.bucketName != "terraform-backend-oss-test" {
t.Fatalf("Incorrect bucketName was provided")
}
if b.statePath != "mystate" {
if b.statePrefix != "mystate" {
t.Fatalf("Incorrect state file path was provided")
}
if b.stateName != "first.tfstate" {
if b.stateKey != "first.tfstate" {
t.Fatalf("Incorrect keyName was provided")
}
@ -65,8 +68,10 @@ func TestBackendConfig_invalidKey(t *testing.T) {
cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{
"region": "cn-beijing",
"bucket": "terraform-backend-oss-test",
"path": "/leading-slash",
"prefix": "/leading-slash",
"name": "/test.tfstate",
"tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com",
"tablestore_table": "TableStore",
})
_, results := New().PrepareConfig(cfg)
@ -79,16 +84,16 @@ func TestBackend(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
statePath := "multi/level/path/"
statePrefix := "multi/level/path/"
b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"path": statePath,
"prefix": statePrefix,
})).(*Backend)
b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"path": statePath,
"prefix": statePrefix,
})).(*Backend)
createOSSBucket(t, b1.ossClient, bucketName)
@ -132,3 +137,35 @@ func deleteOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) {
t.Logf(warning, err)
}
}
// create the dynamoDB table, and wait until we can query it.
func createTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) {
tableMeta := new(tablestore.TableMeta)
tableMeta.TableName = tableName
tableMeta.AddPrimaryKeyColumn("testbackend", tablestore.PrimaryKeyType_STRING)
tableOption := new(tablestore.TableOption)
tableOption.TimeToAlive = -1
tableOption.MaxVersion = 1
reservedThroughput := new(tablestore.ReservedThroughput)
_, err := otsClient.CreateTable(&tablestore.CreateTableRequest{
TableMeta: tableMeta,
TableOption: tableOption,
ReservedThroughput: reservedThroughput,
})
if err != nil {
t.Fatal(err)
}
}
func deleteTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) {
params := &tablestore.DeleteTableRequest{
TableName: tableName,
}
_, err := otsClient.DeleteTable(params)
if err != nil {
t.Logf("WARNING: Failed to delete the test TableStore table %q. It has been left in your Alibaba Cloud account and may incur charges. (error was %s)", tableName, err)
}
}

View File

@ -7,132 +7,106 @@ import (
"fmt"
"io"
"encoding/hex"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
"github.com/hashicorp/go-multierror"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/pkg/errors"
"log"
"sync"
"time"
)
// Store the last saved serial in tablestore with this suffix for consistency checks.
const (
stateIDSuffix = "-md5"
statePKValue = "terraform-remote-state-lock"
)
var (
// The amount of time we will retry a state waiting for it to match the
// expected checksum.
consistencyRetryTimeout = 10 * time.Second
// delay when polling the state
consistencyRetryPollInterval = 2 * time.Second
)
// test hook called when checksums don't match
var testChecksumHook func()
type TableStorePrimaryKeyMeta struct {
PKName string
PKType string
}
type RemoteClient struct {
ossClient *oss.Client
otsClient *tablestore.TableStoreClient
bucketName string
stateFile string
lockFile string
serverSideEncryption bool
acl string
doLock bool
info *state.LockInfo
mu sync.Mutex
otsTable string
otsTabkePK TableStorePrimaryKeyMeta
}
func (c *RemoteClient) Get() (payload *remote.Payload, err error) {
c.mu.Lock()
defer c.mu.Unlock()
deadline := time.Now().Add(consistencyRetryTimeout)
buf, err := c.getObj(c.stateFile)
// If we have a checksum, and the returned payload doesn't match, we retry
// up until deadline.
for {
payload, err = c.getObj()
if err != nil {
return nil, err
}
// If there was no data, then return nil
if buf == nil || len(buf.Bytes()) == 0 {
log.Printf("[DEBUG] State %s has no data.", c.stateFile)
return nil, nil
// If the remote state was manually removed the payload will be nil,
// but if there's still a digest entry for that state we will still try
// to compare the MD5 below.
var digest []byte
if payload != nil {
digest = payload.MD5
}
md5 := md5.Sum(buf.Bytes())
payload = &remote.Payload{
Data: buf.Bytes(),
MD5: md5[:],
// verify that this state is what we expect
if expected, err := c.getMD5(); err != nil {
log.Printf("[WARN] failed to fetch state md5: %s", err)
} else if len(expected) > 0 && !bytes.Equal(expected, digest) {
log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest)
if testChecksumHook != nil {
testChecksumHook()
}
if time.Now().Before(deadline) {
time.Sleep(consistencyRetryPollInterval)
log.Println("[INFO] retrying OSS RemoteClient.Get...")
continue
}
return nil, fmt.Errorf(errBadChecksumFmt, digest)
}
break
}
return payload, nil
}
func (c *RemoteClient) Put(data []byte) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.putObj(c.stateFile, data)
}
func (c *RemoteClient) Delete() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.deleteObj(c.stateFile)
}
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
c.mu.Lock()
defer c.mu.Unlock()
if !c.doLock {
return "", nil
}
bucket, err := c.ossClient.Bucket(c.bucketName)
if err != nil {
return "", fmt.Errorf("Error getting bucket: %#v", err)
}
infoJson, err := json.Marshal(info)
if err != nil {
return "", err
}
if info.ID == "" {
lockID, err := uuid.GenerateUUID()
if err != nil {
return "", err
}
info.ID = lockID
}
info.Path = c.lockFile
exist, err := bucket.IsObjectExist(info.Path)
if err != nil {
return "", fmt.Errorf("Error checking object %s: %#v", info.Path, err)
}
if !exist {
if err := c.putObj(info.Path, infoJson); err != nil {
return "", err
}
} else if _, err := c.validLock(info.ID); err != nil {
return "", err
}
return info.ID, nil
}
func (c *RemoteClient) Unlock(id string) error {
c.mu.Lock()
defer c.mu.Unlock()
if !c.doLock {
return nil
}
lockInfo, err := c.validLock(id)
if err != nil {
return err
}
if err := c.deleteObj(c.lockFile); err != nil {
return &state.LockError{
Info: lockInfo,
Err: err,
}
}
return nil
}
func (c *RemoteClient) putObj(key string, data []byte) error {
bucket, err := c.ossClient.Bucket(c.bucketName)
if err != nil {
return fmt.Errorf("Error getting bucket: %#v", err)
}
body := bytes.NewReader(data)
var options []oss.Option
@ -146,28 +120,321 @@ func (c *RemoteClient) putObj(key string, data []byte) error {
options = append(options, oss.ContentLength(int64(len(data))))
if body != nil {
if err := bucket.PutObject(key, body, options...); err != nil {
return fmt.Errorf("failed to upload %s: %#v", key, err)
if err := bucket.PutObject(c.stateFile, body, options...); err != nil {
return fmt.Errorf("Failed to upload state %s: %#v", c.stateFile, err)
}
return nil
}
sum := md5.Sum(data)
if err := c.putMD5(sum[:]); err != nil {
// if this errors out, we unfortunately have to error out altogether,
// since the next Get will inevitably fail.
return fmt.Errorf("Failed to store state MD5: %s", err)
}
return nil
}
func (c *RemoteClient) getObj(key string) (*bytes.Buffer, error) {
func (c *RemoteClient) Delete() error {
bucket, err := c.ossClient.Bucket(c.bucketName)
if err != nil {
return nil, fmt.Errorf("Error getting bucket: %#v", err)
return fmt.Errorf("Error getting bucket %s: %#v", c.bucketName, err)
}
if exist, err := bucket.IsObjectExist(key); err != nil {
return nil, fmt.Errorf("Estimating object %s is exist got an error: %#v", key, err)
log.Printf("[DEBUG] Deleting remote state from OSS: %#v", c.stateFile)
if err := bucket.DeleteObject(c.stateFile); err != nil {
return fmt.Errorf("Error deleting state %s: %#v", c.stateFile, err)
}
if err := c.deleteMD5(); err != nil {
log.Printf("[WARN] Error deleting state MD5: %s", err)
}
return nil
}
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
if c.otsTable == "" {
return "", nil
}
if info.ID == "" {
lockID, err := uuid.GenerateUUID()
if err != nil {
return "", err
}
info.ID = lockID
}
putParams := &tablestore.PutRowChange{
TableName: c.otsTable,
PrimaryKey: &tablestore.PrimaryKey{
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
{
ColumnName: c.otsTabkePK.PKName,
Value: c.getPKValue(),
},
},
},
Columns: []tablestore.AttributeColumn{
{
ColumnName: "LockID",
Value: c.lockFile,
},
{
ColumnName: "Info",
Value: string(info.Marshal()),
},
},
Condition: &tablestore.RowCondition{
RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST,
},
}
log.Printf("[DEBUG] Recoring state lock in tablestore: %#v", putParams)
_, err := c.otsClient.PutRow(&tablestore.PutRowRequest{
PutRowChange: putParams,
})
if err != nil {
log.Printf("[WARN] Error storing state lock in tablestore: %#v", err)
lockInfo, infoErr := c.getLockInfo()
if infoErr != nil {
log.Printf("[WARN] Error getting lock info: %#v", err)
err = multierror.Append(err, infoErr)
}
lockErr := &state.LockError{
Err: err,
Info: lockInfo,
}
log.Printf("[WARN] state lock error: %#v", lockErr)
return "", lockErr
}
return info.ID, nil
}
func (c *RemoteClient) getMD5() ([]byte, error) {
if c.otsTable == "" {
return nil, nil
}
getParams := &tablestore.SingleRowQueryCriteria{
TableName: c.otsTable,
PrimaryKey: &tablestore.PrimaryKey{
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
{
ColumnName: c.otsTabkePK.PKName,
Value: c.getPKValue(),
},
},
},
ColumnsToGet: []string{"LockID", "Digest"},
MaxVersion: 1,
}
log.Printf("[DEBUG] Retrieving state serial in tablestore: %#v", getParams)
object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{
SingleRowQueryCriteria: getParams,
})
if err != nil {
return nil, err
}
var val string
if v, ok := object.GetColumnMap().Columns["Digest"]; ok && len(v) > 0 {
val = v[0].Value.(string)
}
sum, err := hex.DecodeString(val)
if err != nil || len(sum) != md5.Size {
return nil, errors.New("invalid md5")
}
return sum, nil
}
// store the hash of the state to that clients can check for stale state files.
func (c *RemoteClient) putMD5(sum []byte) error {
if c.otsTable == "" {
return nil
}
if len(sum) != md5.Size {
return errors.New("invalid payload md5")
}
putParams := &tablestore.PutRowChange{
TableName: c.otsTable,
PrimaryKey: &tablestore.PrimaryKey{
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
{
ColumnName: c.otsTabkePK.PKName,
Value: c.getPKValue(),
},
},
},
Columns: []tablestore.AttributeColumn{
{
ColumnName: "LockID",
Value: c.lockPath() + stateIDSuffix,
},
{
ColumnName: "Digest",
Value: hex.EncodeToString(sum),
},
},
Condition: &tablestore.RowCondition{
RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST,
},
}
log.Printf("[DEBUG] Recoring state serial in tablestore: %#v", putParams)
_, err := c.otsClient.PutRow(&tablestore.PutRowRequest{
PutRowChange: putParams,
})
if err != nil {
log.Printf("[WARN] failed to record state serial in tablestore: %s", err)
}
return nil
}
// remove the hash value for a deleted state
func (c *RemoteClient) deleteMD5() error {
if c.otsTable == "" {
return nil
}
params := &tablestore.DeleteRowRequest{
DeleteRowChange: &tablestore.DeleteRowChange{
TableName: c.otsTable,
PrimaryKey: &tablestore.PrimaryKey{
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
{
ColumnName: c.otsTabkePK.PKName,
Value: c.getPKValue(),
},
},
},
Condition: &tablestore.RowCondition{
RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST,
},
},
}
log.Printf("[DEBUG] Deleting state serial in tablestore: %#v", params)
if _, err := c.otsClient.DeleteRow(params); err != nil {
return err
}
return nil
}
func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
getParams := &tablestore.SingleRowQueryCriteria{
TableName: c.otsTable,
PrimaryKey: &tablestore.PrimaryKey{
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
{
ColumnName: c.otsTabkePK.PKName,
Value: c.getPKValue(),
},
},
},
ColumnsToGet: []string{"LockID", "Info"},
MaxVersion: 1,
}
log.Printf("[DEBUG] Retrieving state lock info from tablestore: %#v", getParams)
object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{
SingleRowQueryCriteria: getParams,
})
if err != nil {
return nil, err
}
var infoData string
if v, ok := object.GetColumnMap().Columns["Info"]; ok && len(v) > 0 {
infoData = v[0].Value.(string)
}
lockInfo := &state.LockInfo{}
err = json.Unmarshal([]byte(infoData), lockInfo)
if err != nil {
return nil, err
}
return lockInfo, nil
}
func (c *RemoteClient) Unlock(id string) error {
if c.otsTable == "" {
return nil
}
lockErr := &state.LockError{}
lockInfo, err := c.getLockInfo()
if err != nil {
lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err)
return lockErr
}
lockErr.Info = lockInfo
if lockInfo.ID != id {
lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id)
return lockErr
}
params := &tablestore.DeleteRowRequest{
DeleteRowChange: &tablestore.DeleteRowChange{
TableName: c.otsTable,
PrimaryKey: &tablestore.PrimaryKey{
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
{
ColumnName: c.otsTabkePK.PKName,
Value: c.getPKValue(),
},
},
},
Condition: &tablestore.RowCondition{
RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST,
},
},
}
log.Printf("[DEBUG] Deleting state lock from tablestore: %#v", params)
_, err = c.otsClient.DeleteRow(params)
if err != nil {
lockErr.Err = err
return lockErr
}
return nil
}
func (c *RemoteClient) lockPath() string {
return fmt.Sprintf("%s/%s", c.bucketName, c.stateFile)
}
func (c *RemoteClient) getObj() (*remote.Payload, error) {
bucket, err := c.ossClient.Bucket(c.bucketName)
if err != nil {
return nil, fmt.Errorf("Error getting bucket %s: %#v", c.bucketName, err)
}
if exist, err := bucket.IsObjectExist(c.stateFile); err != nil {
return nil, fmt.Errorf("Estimating object %s is exist got an error: %#v", c.stateFile, err)
} else if !exist {
return nil, nil
}
var options []oss.Option
output, err := bucket.GetObject(key, options...)
output, err := bucket.GetObject(c.stateFile, options...)
if err != nil {
return nil, fmt.Errorf("Error getting object: %#v", err)
}
@ -176,51 +443,42 @@ func (c *RemoteClient) getObj(key string) (*bytes.Buffer, error) {
if _, err := io.Copy(buf, output); err != nil {
return nil, fmt.Errorf("Failed to read remote state: %s", err)
}
return buf, nil
sum := md5.Sum(buf.Bytes())
payload := &remote.Payload{
Data: buf.Bytes(),
MD5: sum[:],
}
func (c *RemoteClient) deleteObj(key string) error {
bucket, err := c.ossClient.Bucket(c.bucketName)
if err != nil {
return fmt.Errorf("Error getting bucket: %#v", err)
}
if err := bucket.DeleteObject(key); err != nil {
return fmt.Errorf("Error deleting object %s: %#v", key, err)
}
return nil
}
// lockInfo reads the lock file, parses its contents and returns the parsed
// LockInfo struct.
func (c *RemoteClient) lockInfo() (*state.LockInfo, error) {
buf, err := c.getObj(c.lockFile)
if err != nil {
return nil, err
}
if buf == nil || len(buf.Bytes()) == 0 {
// If there was no data, then return nil
if len(payload.Data) == 0 {
return nil, nil
}
info := &state.LockInfo{}
if err := json.Unmarshal(buf.Bytes(), info); err != nil {
return nil, err
return payload, nil
}
return info, nil
func (c *RemoteClient) getPKValue() (value interface{}) {
value = statePKValue
if c.otsTabkePK.PKType == "Integer" {
value = hashcode.String(statePKValue)
} else if c.otsTabkePK.PKType == "Binary" {
value = stringToBin(statePKValue)
}
return
}
func (c *RemoteClient) validLock(id string) (*state.LockInfo, *state.LockError) {
lockErr := &state.LockError{}
lockInfo, err := c.lockInfo()
if err != nil {
lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err)
return nil, lockErr
func stringToBin(s string) (binString string) {
for _, c := range s {
binString = fmt.Sprintf("%s%b", binString, c)
}
return
}
lockErr.Info = lockInfo
if lockInfo.ID != id {
lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id)
return nil, lockErr
}
return lockInfo, nil
}
const errBadChecksumFmt = `state data in OSS does not have the expected content.
This may be caused by unusually long delays in OSS processing a previous state
update. Please wait for a minute or two and try again. If this problem
persists, and neither OSS nor TableStore are experiencing an outage, you may need
to manually verify the remote state and update the Digest value stored in the
TableStore table to the following value: %x
`

View File

@ -2,14 +2,21 @@ package oss
import (
"fmt"
"strings"
"testing"
"time"
"bytes"
"crypto/md5"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/states/statefile"
)
// NOTE: Before running this testcase, please create a OTS instance called 'tf-oss-remote'
var RemoteTestUsedOTSEndpoint = "https://tf-oss-remote.cn-hangzhou.ots.aliyuncs.com"
func TestRemoteClient_impl(t *testing.T) {
var _ remote.Client = new(RemoteClient)
var _ remote.ClientLocker = new(RemoteClient)
@ -17,12 +24,12 @@ func TestRemoteClient_impl(t *testing.T) {
func TestRemoteClient(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
path := "testState"
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"path": path,
"prefix": path,
"encrypt": true,
})).(*Backend)
@ -37,76 +44,287 @@ func TestRemoteClient(t *testing.T) {
remote.TestClient(t, state.(*remote.State).Client)
}
func TestOSS_stateLock(t *testing.T) {
func TestRemoteClientLocks(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
path := "testState"
b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"path": path,
"prefix": path,
"encrypt": true,
"tablestore_table": tableName,
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
})).(*Backend)
b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"path": path,
"prefix": path,
"encrypt": true,
"tablestore_table": tableName,
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
})).(*Backend)
createOSSBucket(t, b1.ossClient, bucketName)
defer deleteOSSBucket(t, b1.ossClient, bucketName)
createTablestoreTable(t, b1.otsClient, tableName)
defer deleteTablestoreTable(t, b1.otsClient, tableName)
s1, err := b1.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
t.Fatal(err)
}
s2, err := b2.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
t.Fatal(err)
}
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
}
// verify that we can unlock a state with an existing lock
func TestOSS_destroyLock(t *testing.T) {
func TestRemoteForceUnlock(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix())
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
path := "testState"
b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"prefix": path,
"encrypt": true,
"tablestore_table": tableName,
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
})).(*Backend)
b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"prefix": path,
"encrypt": true,
"tablestore_table": tableName,
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
})).(*Backend)
createOSSBucket(t, b1.ossClient, bucketName)
defer deleteOSSBucket(t, b1.ossClient, bucketName)
createTablestoreTable(t, b1.otsClient, tableName)
defer deleteTablestoreTable(t, b1.otsClient, tableName)
// first test with default
s1, err := b1.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
info := state.NewLockInfo()
info.Operation = "test"
info.Who = "clientA"
lockID, err := s1.Lock(info)
if err != nil {
t.Fatal("unable to get initial lock:", err)
}
// s1 is now locked, get the same state through s2 and unlock it
s2, err := b2.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatal("failed to get default state to force unlock:", err)
}
if err := s2.Unlock(lockID); err != nil {
t.Fatal("failed to force-unlock default state")
}
// now try the same thing with a named state
// first test with default
s1, err = b1.StateMgr("test")
if err != nil {
t.Fatal(err)
}
info = state.NewLockInfo()
info.Operation = "test"
info.Who = "clientA"
lockID, err = s1.Lock(info)
if err != nil {
t.Fatal("unable to get initial lock:", err)
}
// s1 is now locked, get the same state through s2 and unlock it
s2, err = b2.StateMgr("test")
if err != nil {
t.Fatal("failed to get named state to force unlock:", err)
}
if err = s2.Unlock(lockID); err != nil {
t.Fatal("failed to force-unlock named state")
}
}
func TestRemoteClient_clientMD5(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
path := "testState"
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"path": path,
"encrypt": true,
"prefix": path,
"tablestore_table": tableName,
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
})).(*Backend)
createOSSBucket(t, b.ossClient, bucketName)
defer deleteOSSBucket(t, b.ossClient, bucketName)
createTablestoreTable(t, b.otsClient, tableName)
defer deleteTablestoreTable(t, b.otsClient, tableName)
s, err := b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
t.Fatal(err)
}
client := s.(*remote.State).Client.(*RemoteClient)
sum := md5.Sum([]byte("test"))
if err := client.putMD5(sum[:]); err != nil {
t.Fatal(err)
}
c := s.(*remote.State).Client.(*RemoteClient)
info := state.NewLockInfo()
id, err := c.Lock(info)
getSum, err := client.getMD5()
if err != nil {
t.Fatalf("err: %s", err)
t.Fatal(err)
}
if err := c.Unlock(id); err != nil {
t.Fatalf("err: %s", err)
if !bytes.Equal(getSum, sum[:]) {
t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum)
}
res, err := c.getObj(c.lockFile)
if err := client.deleteMD5(); err != nil {
t.Fatal(err)
}
if getSum, err := client.getMD5(); err == nil {
t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum)
}
}
// verify that a client won't return a state with an incorrect checksum.
func TestRemoteClient_stateChecksum(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
path := "testState"
b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"prefix": path,
"tablestore_table": tableName,
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
})).(*Backend)
createOSSBucket(t, b1.ossClient, bucketName)
defer deleteOSSBucket(t, b1.ossClient, bucketName)
createTablestoreTable(t, b1.otsClient, tableName)
defer deleteTablestoreTable(t, b1.otsClient, tableName)
s1, err := b1.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
t.Fatal(err)
}
if res != nil && res.String() != "" {
t.Fatalf("lock key not cleaned up at: %s", string(c.stateFile))
client1 := s1.(*remote.State).Client
// create an old and new state version to persist
s := state.TestStateInitial()
sf := &statefile.File{State: s}
var oldState bytes.Buffer
if err := statefile.Write(sf, &oldState); err != nil {
t.Fatal(err)
}
sf.Serial++
var newState bytes.Buffer
if err := statefile.Write(sf, &newState); err != nil {
t.Fatal(err)
}
// Use b2 without a tablestore_table to bypass the lock table to write the state directly.
// client2 will write the "incorrect" state, simulating oss eventually consistency delays
b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"prefix": path,
})).(*Backend)
s2, err := b2.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
client2 := s2.(*remote.State).Client
// write the new state through client2 so that there is no checksum yet
if err := client2.Put(newState.Bytes()); err != nil {
t.Fatal(err)
}
// verify that we can pull a state without a checksum
if _, err := client1.Get(); err != nil {
t.Fatal(err)
}
// write the new state back with its checksum
if err := client1.Put(newState.Bytes()); err != nil {
t.Fatal(err)
}
// put an empty state in place to check for panics during get
if err := client2.Put([]byte{}); err != nil {
t.Fatal(err)
}
// remove the timeouts so we can fail immediately
origTimeout := consistencyRetryTimeout
origInterval := consistencyRetryPollInterval
defer func() {
consistencyRetryTimeout = origTimeout
consistencyRetryPollInterval = origInterval
}()
consistencyRetryTimeout = 0
consistencyRetryPollInterval = 0
// fetching an empty state through client1 should now error out due to a
// mismatched checksum.
if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) {
t.Fatalf("expected state checksum error: got %s", err)
}
// put the old state in place of the new, without updating the checksum
if err := client2.Put(oldState.Bytes()); err != nil {
t.Fatal(err)
}
// fetching the wrong state through client1 should now error out due to a
// mismatched checksum.
if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) {
t.Fatalf("expected state checksum error: got %s", err)
}
// update the state with the correct one after we Get again
testChecksumHook = func() {
if err := client2.Put(newState.Bytes()); err != nil {
t.Fatal(err)
}
testChecksumHook = nil
}
consistencyRetryTimeout = origTimeout
// this final Get will fail to fail the checksum verification, the above
// callback will update the state with the correct version, and Get should
// retry automatically.
if _, err := client1.Get(); err != nil {
t.Fatal(err)
}
}

View File

@ -4,6 +4,14 @@ package config
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ManagedResourceMode-0]
_ = x[DataResourceMode-1]
}
const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
var _ResourceMode_index = [...]uint8{0, 19, 35}

View File

@ -4,6 +4,15 @@ package configs
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ProvisionerOnFailureInvalid-0]
_ = x[ProvisionerOnFailureContinue-1]
_ = x[ProvisionerOnFailureFail-2]
}
const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail"
var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79}

View File

@ -4,6 +4,15 @@ package configs
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ProvisionerWhenInvalid-0]
_ = x[ProvisionerWhenCreate-1]
_ = x[ProvisionerWhenDestroy-2]
}
const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy"
var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65}

View File

@ -4,6 +4,16 @@ package configs
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[TypeHintNone-0]
_ = x[TypeHintString-83]
_ = x[TypeHintList-76]
_ = x[TypeHintMap-77]
}
const (
_VariableTypeHint_name_0 = "TypeHintNone"
_VariableTypeHint_name_1 = "TypeHintListTypeHintMap"

7
go.mod
View File

@ -8,14 +8,16 @@ require (
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect
github.com/agext/levenshtein v1.2.2
github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 // indirect
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190104080739-ef2ef6084d8f
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70
github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible
github.com/apparentlymart/go-cidr v1.0.0
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/aws/aws-sdk-go v1.16.36
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect
github.com/blang/semver v3.5.1+incompatible
github.com/boltdb/bolt v1.3.1 // indirect
github.com/chzyer/logex v1.1.10 // indirect
@ -97,7 +99,7 @@ require (
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c // indirect
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 // indirect
github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17
github.com/posener/complete v1.2.1
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/sirupsen/logrus v1.1.1 // indirect
@ -121,6 +123,7 @@ require (
golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9
google.golang.org/api v0.1.0
google.golang.org/grpc v1.18.0
gopkg.in/ini.v1 v1.42.0 // indirect
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect

10
go.sum
View File

@ -28,10 +28,12 @@ github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXva
github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 h1:LoeFxdq5zUCBQPhbQKE6zvoGwHMxCBlqwbH9+9kHoHA=
github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190104080739-ef2ef6084d8f h1:pM8wn2zKfEVQkR9cj//GkywiJXMwtZ9feuNsEkHqBC8=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190104080739-ef2ef6084d8f/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a h1:APorzFpCcv6wtD5vmRWYqNm4N55kbepL7c7kTq9XI6A=
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70 h1:FrF4uxA24DF3ARNXVbUin3wa5fDLaB1Cy8mKks/LRz4=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible h1:ABQ7FF+IxSFHDMOTtjCfmMDMHiCq6EsAoCV/9sFinaM=
github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZylxvcg8H7wBIDfvO5g/cy4/sz1iucBlc2l3Jw=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM=
github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
@ -55,6 +57,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI
github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
github.com/aws/aws-sdk-go v1.16.36 h1:POeH34ZME++pr7GBGh+ZO6Y5kOwSMQpqp5BGUgooJ6k=
github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
@ -514,6 +518,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 h1:kb0VV7NuIojvRfzwslQeP3yArBqJHW9tOl4t38VS1jM=
gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8=

View File

@ -4,6 +4,18 @@ package schema
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[getSourceState-1]
_ = x[getSourceConfig-2]
_ = x[getSourceDiff-4]
_ = x[getSourceSet-8]
_ = x[getSourceExact-16]
_ = x[getSourceLevelMask-15]
}
const (
_getSource_name_0 = "getSourceStategetSourceConfig"
_getSource_name_1 = "getSourceDiff"

View File

@ -4,6 +4,21 @@ package schema
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[TypeInvalid-0]
_ = x[TypeBool-1]
_ = x[TypeInt-2]
_ = x[TypeFloat-3]
_ = x[TypeString-4]
_ = x[TypeList-5]
_ = x[TypeMap-6]
_ = x[TypeSet-7]
_ = x[typeObject-8]
}
const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}

View File

@ -4,6 +4,19 @@ package plans
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[NoOp-0]
_ = x[Create-43]
_ = x[Read-8592]
_ = x[Update-126]
_ = x[DeleteThenCreate-8723]
_ = x[CreateThenDelete-177]
_ = x[Delete-45]
}
const (
_Action_name_0 = "NoOp"
_Action_name_1 = "Create"

View File

@ -4,6 +4,15 @@ package states
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[NoEach-0]
_ = x[EachList-76]
_ = x[EachMap-77]
}
const (
_EachMode_name_0 = "NoEach"
_EachMode_name_1 = "EachListEachMap"

View File

@ -4,6 +4,15 @@ package states
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ObjectReady-82]
_ = x[ObjectTainted-84]
_ = x[ObjectPlanned-80]
}
const (
_ObjectStatus_name_0 = "ObjectPlanned"
_ObjectStatus_name_1 = "ObjectReady"

View File

@ -4,6 +4,17 @@ package statemgr
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[SnapshotOlder-60]
_ = x[SnapshotNewer-62]
_ = x[SnapshotEqual-61]
_ = x[SnapshotUnrelated-33]
_ = x[SnapshotLegacy-63]
}
const (
_SnapshotMetaRel_name_0 = "SnapshotUnrelated"
_SnapshotMetaRel_name_1 = "SnapshotOlderSnapshotEqualSnapshotNewerSnapshotLegacy"

View File

@ -4,6 +4,20 @@ package terraform
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[GraphTypeInvalid-0]
_ = x[GraphTypeLegacy-1]
_ = x[GraphTypeRefresh-2]
_ = x[GraphTypePlan-3]
_ = x[GraphTypePlanDestroy-4]
_ = x[GraphTypeApply-5]
_ = x[GraphTypeValidate-6]
_ = x[GraphTypeEval-7]
}
const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval"
var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124}

View File

@ -4,6 +4,16 @@ package terraform
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[TypeInvalid-0]
_ = x[TypePrimary-1]
_ = x[TypeTainted-2]
_ = x[TypeDeposed-3]
}
const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}

View File

@ -4,6 +4,21 @@ package terraform
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ValueFromUnknown-0]
_ = x[ValueFromConfig-67]
_ = x[ValueFromAutoFile-70]
_ = x[ValueFromNamedFile-78]
_ = x[ValueFromCLIArg-65]
_ = x[ValueFromEnvVar-69]
_ = x[ValueFromInput-73]
_ = x[ValueFromPlan-80]
_ = x[ValueFromCaller-83]
}
const (
_ValueSourceType_name_0 = "ValueFromUnknown"
_ValueSourceType_name_1 = "ValueFromCLIArg"

View File

@ -4,6 +4,21 @@ package terraform
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[walkInvalid-0]
_ = x[walkApply-1]
_ = x[walkPlan-2]
_ = x[walkPlanDestroy-3]
_ = x[walkRefresh-4]
_ = x[walkValidate-5]
_ = x[walkDestroy-6]
_ = x[walkImport-7]
_ = x[walkEval-8]
}
const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval"
var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95}

View File

@ -4,6 +4,14 @@ package tfdiags
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Error-69]
_ = x[Warning-87]
}
const (
_Severity_name_0 = "Error"
_Severity_name_1 = "Warning"

View File

@ -0,0 +1,12 @@
package credentials
type BearerTokenCredential struct {
BearerToken string
}
// NewBearerTokenCredential return a BearerTokenCredential object
func NewBearerTokenCredential(token string) *BearerTokenCredential {
return &BearerTokenCredential{
BearerToken: token,
}
}

View File

@ -1,17 +1,5 @@
package credentials
// Deprecated: Use EcsRamRoleCredential in this package instead.
type StsRoleNameOnEcsCredential struct {
RoleName string
}
// Deprecated: Use NewEcsRamRoleCredential in this package instead.
func NewStsRoleNameOnEcsCredential(roleName string) *StsRoleNameOnEcsCredential {
return &StsRoleNameOnEcsCredential{
RoleName: roleName,
}
}
func (oldCred *StsRoleNameOnEcsCredential) ToEcsRamRoleCredential() *EcsRamRoleCredential {
return &EcsRamRoleCredential{
RoleName: oldCred.RoleName,
@ -27,3 +15,15 @@ func NewEcsRamRoleCredential(roleName string) *EcsRamRoleCredential {
RoleName: roleName,
}
}
// Deprecated: Use EcsRamRoleCredential in this package instead.
type StsRoleNameOnEcsCredential struct {
RoleName string
}
// Deprecated: Use NewEcsRamRoleCredential in this package instead.
func NewStsRoleNameOnEcsCredential(roleName string) *StsRoleNameOnEcsCredential {
return &StsRoleNameOnEcsCredential{
RoleName: roleName,
}
}

View File

@ -0,0 +1,30 @@
package provider
import (
"errors"
"os"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
)
type EnvProvider struct{}
var ProviderEnv = new(EnvProvider)
func NewEnvProvider() Provider {
return &EnvProvider{}
}
func (p *EnvProvider) Resolve() (auth.Credential, error) {
accessKeyID, ok1 := os.LookupEnv(ENVAccessKeyID)
accessKeySecret, ok2 := os.LookupEnv(ENVAccessKeySecret)
if !ok1 || !ok2 {
return nil, nil
}
if accessKeyID == "" || accessKeySecret == "" {
return nil, errors.New("Environmental variable (ALIBABACLOUD_ACCESS_KEY_ID or ALIBABACLOUD_ACCESS_KEY_SECRET) is empty")
}
return credentials.NewAccessKeyCredential(accessKeyID, accessKeySecret), nil
}

View File

@ -0,0 +1,92 @@
package provider
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
)
var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
type InstanceCredentialsProvider struct{}
var ProviderInstance = new(InstanceCredentialsProvider)
var HookGet = func(fn func(string) (int, []byte, error)) func(string) (int, []byte, error) {
return fn
}
func NewInstanceCredentialsProvider() Provider {
return &InstanceCredentialsProvider{}
}
func (p *InstanceCredentialsProvider) Resolve() (auth.Credential, error) {
roleName, ok := os.LookupEnv(ENVEcsMetadata)
if !ok {
return nil, nil
}
if roleName == "" {
return nil, errors.New("Environmental variable 'ALIBABA_CLOUD_ECS_METADATA' are empty")
}
status, content, err := HookGet(get)(securityCredURL + roleName)
if err != nil {
return nil, err
}
if status != 200 {
if status == 404 {
return nil, fmt.Errorf("The role was not found in the instance")
}
return nil, fmt.Errorf("Received %d when getting security credentials for %s", status, roleName)
}
body := make(map[string]interface{})
if err := json.Unmarshal(content, &body); err != nil {
return nil, err
}
accessKeyID, err := extractString(body, "AccessKeyId")
if err != nil {
return nil, err
}
accessKeySecret, err := extractString(body, "AccessKeySecret")
if err != nil {
return nil, err
}
securityToken, err := extractString(body, "SecurityToken")
if err != nil {
return nil, err
}
return credentials.NewStsTokenCredential(accessKeyID, accessKeySecret, securityToken), nil
}
func get(url string) (status int, content []byte, err error) {
httpClient := http.DefaultClient
httpClient.Timeout = time.Second * 1
resp, err := httpClient.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
content, err = ioutil.ReadAll(resp.Body)
return resp.StatusCode, content, err
}
func extractString(m map[string]interface{}, key string) (string, error) {
raw, ok := m[key]
if !ok {
return "", fmt.Errorf("%s not in map", key)
}
str, ok := raw.(string)
if !ok {
return "", fmt.Errorf("%s is not a string in map", key)
}
return str, nil
}

View File

@ -0,0 +1,158 @@
package provider
import (
"bufio"
"errors"
"os"
"runtime"
"strings"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
ini "gopkg.in/ini.v1"
)
type ProfileProvider struct {
Profile string
}
var ProviderProfile = NewProfileProvider()
// NewProfileProvider receive zero or more parameters,
// when length of name is 0, the value of field Profile will be "default",
// and when there are multiple inputs, the function will take the
// first one and discard the other values.
func NewProfileProvider(name ...string) Provider {
p := new(ProfileProvider)
if len(name) == 0 {
p.Profile = "default"
} else {
p.Profile = name[0]
}
return p
}
// Resolve implements the Provider interface
// when credential type is rsa_key_pair, the content of private_key file
// must be able to be parsed directly into the required string
// that NewRsaKeyPairCredential function needed
func (p *ProfileProvider) Resolve() (auth.Credential, error) {
path, ok := os.LookupEnv(ENVCredentialFile)
if !ok {
path, err := checkDefaultPath()
if err != nil {
return nil, err
}
if path == "" {
return nil, nil
}
} else if path == "" {
return nil, errors.New("Environment variable '" + ENVCredentialFile + "' cannot be empty")
}
ini, err := ini.Load(path)
if err != nil {
return nil, errors.New("ERROR: Can not open file" + err.Error())
}
section, err := ini.GetSection(p.Profile)
if err != nil {
return nil, errors.New("ERROR: Can not load section" + err.Error())
}
value, err := section.GetKey("type")
if err != nil {
return nil, errors.New("ERROR: Can not find credential type" + err.Error())
}
switch value.String() {
case "access_key":
value1, err1 := section.GetKey("access_key_id")
value2, err2 := section.GetKey("access_key_secret")
if err1 != nil || err2 != nil {
return nil, errors.New("ERROR: Failed to get value")
}
if value1.String() == "" || value2.String() == "" {
return nil, errors.New("ERROR: Value can't be empty")
}
return credentials.NewAccessKeyCredential(value1.String(), value2.String()), nil
case "ecs_ram_role":
value1, err1 := section.GetKey("role_name")
if err1 != nil {
return nil, errors.New("ERROR: Failed to get value")
}
if value1.String() == "" {
return nil, errors.New("ERROR: Value can't be empty")
}
return credentials.NewEcsRamRoleCredential(value1.String()), nil
case "ram_role_arn":
value1, err1 := section.GetKey("access_key_id")
value2, err2 := section.GetKey("access_key_secret")
value3, err3 := section.GetKey("role_arn")
value4, err4 := section.GetKey("role_session_name")
if err1 != nil || err2 != nil || err3 != nil || err4 != nil {
return nil, errors.New("ERROR: Failed to get value")
}
if value1.String() == "" || value2.String() == "" || value3.String() == "" || value4.String() == "" {
return nil, errors.New("ERROR: Value can't be empty")
}
return credentials.NewRamRoleArnCredential(value1.String(), value2.String(), value3.String(), value4.String(), 3600), nil
case "rsa_key_pair":
value1, err1 := section.GetKey("public_key_id")
value2, err2 := section.GetKey("private_key_file")
if err1 != nil || err2 != nil {
return nil, errors.New("ERROR: Failed to get value")
}
if value1.String() == "" || value2.String() == "" {
return nil, errors.New("ERROR: Value can't be empty")
}
file, err := os.Open(value2.String())
if err != nil {
return nil, errors.New("ERROR: Can not get private_key")
}
defer file.Close()
var privateKey string
scan := bufio.NewScanner(file)
var data string
for scan.Scan() {
if strings.HasPrefix(scan.Text(), "----") {
continue
}
data += scan.Text() + "\n"
}
return credentials.NewRsaKeyPairCredential(privateKey, value1.String(), 3600), nil
default:
return nil, errors.New("ERROR: Failed to get credential")
}
}
// GetHomePath return home directory according to the system.
// if the environmental virables does not exist, will return empty
func GetHomePath() string {
if runtime.GOOS == "windows" {
path, ok := os.LookupEnv("USERPROFILE")
if !ok {
return ""
}
return path
}
path, ok := os.LookupEnv("HOME")
if !ok {
return ""
}
return path
}
func checkDefaultPath() (path string, err error) {
path = GetHomePath()
if path == "" {
return "", errors.New("The default credential file path is invalid")
}
path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1)
_, err = os.Stat(path)
if err != nil {
return "", nil
}
return path, nil
}

View File

@ -0,0 +1,19 @@
package provider
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
)
//Environmental virables that may be used by the provider
const (
ENVAccessKeyID = "ALIBABA_CLOUD_ACCESS_KEY_ID"
ENVAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET"
ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE"
ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA"
PATHCredentialFile = "~/.alibabacloud/credentials"
)
// When you want to customize the provider, you only need to implement the method of the interface.
type Provider interface {
Resolve() (auth.Credential, error)
}

View File

@ -0,0 +1,34 @@
package provider
import (
"errors"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
)
type ProviderChain struct {
Providers []Provider
}
var defaultproviders = []Provider{ProviderEnv, ProviderProfile, ProviderInstance}
var DefaultChain = NewProviderChain(defaultproviders)
func NewProviderChain(providers []Provider) Provider {
return &ProviderChain{
Providers: providers,
}
}
func (p *ProviderChain) Resolve() (auth.Credential, error) {
for _, provider := range p.Providers {
creds, err := provider.Resolve()
if err != nil {
return nil, err
} else if err == nil && creds == nil {
continue
}
return creds, err
}
return nil, errors.New("No credential found")
}

View File

@ -15,6 +15,7 @@ type RamRoleArnCredential struct {
RoleArn string
RoleSessionName string
RoleSessionExpiration int
Policy string
}
// Deprecated: Use RamRoleArnCredential in this package instead.
@ -47,3 +48,14 @@ func NewRamRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionN
RoleSessionExpiration: roleSessionExpiration,
}
}
func NewRamRoleArnWithPolicyCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int) *RamRoleArnCredential {
return &RamRoleArnCredential{
AccessKeyId: accessKeyId,
AccessKeySecret: accessKeySecret,
RoleArn: roleArn,
RoleSessionName: roleSessionName,
RoleSessionExpiration: roleSessionExpiration,
Policy: policy,
}
}

View File

@ -62,7 +62,10 @@ func completeROASignParams(request requests.AcsRequest, signer Signer, regionId
headerParams["x-acs-security-token"] = value
continue
}
if key == "BearerToken" {
headerParams["x-acs-bearer-token"] = value
continue
}
queryParams[key] = value
}
}

View File

@ -44,6 +44,10 @@ func NewSignerWithCredential(credential Credential, commonApi func(request *requ
{
signer = signers.NewStsTokenSigner(instance)
}
case *credentials.BearerTokenCredential:
{
signer = signers.NewBearerTokenSigner(instance)
}
case *credentials.RamRoleArnCredential:
{
signer, err = signers.NewRamRoleArnSigner(instance, commonApi)

View File

@ -34,6 +34,7 @@ func ShaHmac1(source, secret string) string {
}
func Sha256WithRsa(source, secret string) string {
// block, _ := pem.Decode([]byte(secret))
decodeString, err := base64.StdEncoding.DecodeString(secret)
if err != nil {
panic(err)

View File

@ -21,7 +21,7 @@ import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
const defaultInAdvanceScale = 0.8
const defaultInAdvanceScale = 0.95
type credentialUpdater struct {
credentialExpiration int

View File

@ -0,0 +1,35 @@
package signers
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
)
type BearerTokenSigner struct {
credential *credentials.BearerTokenCredential
}
func NewBearerTokenSigner(credential *credentials.BearerTokenCredential) *BearerTokenSigner {
return &BearerTokenSigner{
credential: credential,
}
}
func (signer *BearerTokenSigner) GetExtraParam() map[string]string {
return map[string]string{"BearerToken": signer.credential.BearerToken}
}
func (*BearerTokenSigner) GetName() string {
return ""
}
func (*BearerTokenSigner) GetType() string {
return "BEARERTOKEN"
}
func (*BearerTokenSigner) GetVersion() string {
return "1.0"
}
func (signer *BearerTokenSigner) GetAccessKeyId() (accessKeyId string, err error) {
return "", nil
}
func (signer *BearerTokenSigner) Sign(stringToSign, secretSuffix string) string {
return ""
}

View File

@ -24,7 +24,7 @@ import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/jmespath/go-jmespath"
jmespath "github.com/jmespath/go-jmespath"
)
var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
@ -88,7 +88,7 @@ func (signer *EcsRamRoleSigner) GetExtraParam() map[string]string {
}
func (signer *EcsRamRoleSigner) Sign(stringToSign, secretSuffix string) string {
secret := signer.sessionCredential.AccessKeyId + secretSuffix
secret := signer.sessionCredential.AccessKeySecret + secretSuffix
return ShaHmac1(stringToSign, secret)
}

View File

@ -24,7 +24,7 @@ import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/jmespath/go-jmespath"
jmespath "github.com/jmespath/go-jmespath"
)
type SignerKeyPair struct {
@ -97,7 +97,7 @@ func (signer *SignerKeyPair) GetExtraParam() map[string]string {
}
func (signer *SignerKeyPair) Sign(stringToSign, secretSuffix string) string {
secret := signer.sessionCredential.AccessKeyId + secretSuffix
secret := signer.sessionCredential.AccessKeySecret + secretSuffix
return ShaHmac1(stringToSign, secret)
}
@ -107,6 +107,7 @@ func (signer *SignerKeyPair) buildCommonRequest() (request *requests.CommonReque
request.Version = "2015-04-01"
request.ApiName = "GenerateSessionAccessKey"
request.Scheme = requests.HTTPS
request.SetDomain("sts.ap-northeast-1.aliyuncs.com")
request.QueryParams["PublicKeyId"] = signer.credential.PublicKeyId
request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration)
return

View File

@ -25,7 +25,7 @@ import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/jmespath/go-jmespath"
jmespath "github.com/jmespath/go-jmespath"
)
const (
@ -119,6 +119,9 @@ func (signer *RamRoleArnSigner) buildCommonRequest() (request *requests.CommonRe
request.ApiName = "AssumeRole"
request.Scheme = requests.HTTPS
request.QueryParams["RoleArn"] = signer.credential.RoleArn
if signer.credential.Policy != "" {
request.QueryParams["Policy"] = signer.credential.Policy
}
request.QueryParams["RoleSessionName"] = signer.credential.RoleSessionName
request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration)
return

View File

@ -15,12 +15,20 @@
package sdk
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
@ -39,6 +47,8 @@ func init() {
// Version this value will be replaced while build: -ldflags="-X sdk.version=x.x.x"
var Version = "0.0.1"
var defaultConnectTimeout = 5 * time.Second
var defaultReadTimeout = 10 * time.Second
var DefaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), Version)
@ -48,12 +58,18 @@ var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *
// Client the type Client
type Client struct {
isInsecure bool
regionId string
config *Config
httpProxy string
httpsProxy string
noProxy string
userAgent map[string]string
signer auth.Signer
httpClient *http.Client
asyncTaskQueue chan func()
readTimeout time.Duration
connectTimeout time.Duration
debug bool
isRunning bool
@ -65,6 +81,51 @@ func (client *Client) Init() (err error) {
panic("not support yet")
}
func (client *Client) SetHTTPSInsecure(isInsecure bool) {
client.isInsecure = isInsecure
}
func (client *Client) GetHTTPSInsecure() bool {
return client.isInsecure
}
func (client *Client) SetHttpsProxy(httpsProxy string) {
client.httpsProxy = httpsProxy
}
func (client *Client) GetHttpsProxy() string {
return client.httpsProxy
}
func (client *Client) SetHttpProxy(httpProxy string) {
client.httpProxy = httpProxy
}
func (client *Client) GetHttpProxy() string {
return client.httpProxy
}
func (client *Client) SetNoProxy(noProxy string) {
client.noProxy = noProxy
}
func (client *Client) GetNoProxy() string {
return client.noProxy
}
// InitWithProviderChain will get credential from the providerChain,
// the RsaKeyPairCredential Only applicable to regionID `ap-northeast-1`,
// if your providerChain may return a credential type with RsaKeyPairCredential,
// please ensure your regionID is `ap-northeast-1`.
func (client *Client) InitWithProviderChain(regionId string, provider provider.Provider) (err error) {
config := client.InitClientConfig()
credential, err := provider.Resolve()
if err != nil {
return
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithOptions(regionId string, config *Config, credential auth.Credential) (err error) {
client.isRunning = true
client.asyncChanLock = new(sync.RWMutex)
@ -89,6 +150,57 @@ func (client *Client) InitWithOptions(regionId string, config *Config, credentia
return
}
func (client *Client) SetReadTimeout(readTimeout time.Duration) {
client.readTimeout = readTimeout
}
func (client *Client) SetConnectTimeout(connectTimeout time.Duration) {
client.connectTimeout = connectTimeout
}
func (client *Client) GetReadTimeout() time.Duration {
return client.readTimeout
}
func (client *Client) GetConnectTimeout() time.Duration {
return client.connectTimeout
}
func (client *Client) getHttpProxy(scheme string) (proxy *url.URL, err error) {
if scheme == "https" {
if client.GetHttpsProxy() != "" {
proxy, err = url.Parse(client.httpsProxy)
} else if rawurl := os.Getenv("HTTPS_PROXY"); rawurl != "" {
proxy, err = url.Parse(rawurl)
} else if rawurl := os.Getenv("https_proxy"); rawurl != "" {
proxy, err = url.Parse(rawurl)
}
} else {
if client.GetHttpProxy() != "" {
proxy, err = url.Parse(client.httpProxy)
} else if rawurl := os.Getenv("HTTP_PROXY"); rawurl != "" {
proxy, err = url.Parse(rawurl)
} else if rawurl := os.Getenv("http_proxy"); rawurl != "" {
proxy, err = url.Parse(rawurl)
}
}
return proxy, err
}
func (client *Client) getNoProxy(scheme string) []string {
var urls []string
if client.GetNoProxy() != "" {
urls = strings.Split(client.noProxy, ",")
} else if rawurl := os.Getenv("NO_PROXY"); rawurl != "" {
urls = strings.Split(rawurl, ",")
} else if rawurl := os.Getenv("no_proxy"); rawurl != "" {
urls = strings.Split(rawurl, ",")
}
return urls
}
// EnableAsync enable the async task queue
func (client *Client) EnableAsync(routinePoolSize, maxTaskQueueSize int) {
client.asyncTaskQueue = make(chan func(), maxTaskQueueSize)
@ -136,6 +248,18 @@ func (client *Client) InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret,
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (err error) {
config := client.InitClientConfig()
credential := &credentials.RamRoleArnCredential{
AccessKeyId: accessKeyId,
AccessKeySecret: accessKeySecret,
RoleArn: roleArn,
RoleSessionName: roleSessionName,
Policy: policy,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithRsaKeyPair(regionId, publicKeyId, privateKey string, sessionExpiration int) (err error) {
config := client.InitClientConfig()
credential := &credentials.RsaKeyPairCredential{
@ -154,6 +278,14 @@ func (client *Client) InitWithEcsRamRole(regionId, roleName string) (err error)
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitWithBearerToken(regionId, bearerToken string) (err error) {
config := client.InitClientConfig()
credential := &credentials.BearerTokenCredential{
BearerToken: bearerToken,
}
return client.InitWithOptions(regionId, config, credential)
}
func (client *Client) InitClientConfig() (config *Config) {
if client.config != nil {
return client.config
@ -260,13 +392,103 @@ func (client *Client) BuildRequestWithSigner(request requests.AcsRequest, signer
return
}
func (client *Client) getTimeout(request requests.AcsRequest) (time.Duration, time.Duration) {
readTimeout := defaultReadTimeout
connectTimeout := defaultConnectTimeout
reqReadTimeout := request.GetReadTimeout()
reqConnectTimeout := request.GetConnectTimeout()
if reqReadTimeout != 0*time.Millisecond {
readTimeout = reqReadTimeout
} else if client.readTimeout != 0*time.Millisecond {
readTimeout = client.readTimeout
}
if reqConnectTimeout != 0*time.Millisecond {
connectTimeout = reqConnectTimeout
} else if client.connectTimeout != 0*time.Millisecond {
connectTimeout = client.connectTimeout
}
return readTimeout, connectTimeout
}
func Timeout(connectTimeout, readTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) {
return func(ctx context.Context, network, address string) (net.Conn, error) {
conn, err := (&net.Dialer{
Timeout: connectTimeout,
KeepAlive: 0 * time.Second,
DualStack: true,
}).DialContext(ctx, network, address)
if err == nil {
conn.SetDeadline(time.Now().Add(readTimeout))
}
return conn, err
}
}
func (client *Client) setTimeout(request requests.AcsRequest) {
readTimeout, connectTimeout := client.getTimeout(request)
if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil {
trans.DialContext = Timeout(connectTimeout, readTimeout)
client.httpClient.Transport = trans
} else {
client.httpClient.Transport = &http.Transport{
DialContext: Timeout(connectTimeout, readTimeout),
}
}
}
func (client *Client) getHTTPSInsecure(request requests.AcsRequest) (insecure bool) {
if request.GetHTTPSInsecure() != nil {
insecure = *request.GetHTTPSInsecure()
} else {
insecure = client.GetHTTPSInsecure()
}
return insecure
}
func (client *Client) DoActionWithSigner(request requests.AcsRequest, response responses.AcsResponse, signer auth.Signer) (err error) {
httpRequest, err := client.buildRequestWithSigner(request, signer)
if err != nil {
return
}
client.setTimeout(request)
proxy, err := client.getHttpProxy(httpRequest.URL.Scheme)
if err != nil {
return err
}
noProxy := client.getNoProxy(httpRequest.URL.Scheme)
var flag bool
for _, value := range noProxy {
if value == httpRequest.Host {
flag = true
break
}
}
// Set whether to ignore certificate validation.
// Default InsecureSkipVerify is false.
if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil {
trans.TLSClientConfig = &tls.Config{
InsecureSkipVerify: client.getHTTPSInsecure(request),
}
if proxy != nil && !flag {
trans.Proxy = http.ProxyURL(proxy)
}
client.httpClient.Transport = trans
}
var httpResponse *http.Response
for retryTimes := 0; retryTimes <= client.config.MaxRetryTime; retryTimes++ {
if proxy != nil && proxy.User != nil{
if password, passwordSet := proxy.User.Password(); passwordSet {
httpRequest.SetBasicAuth(proxy.User.Username(), password)
}
}
debug("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto)
debug("> Host: %s", httpRequest.Host)
for key, value := range httpRequest.Header {
@ -287,13 +509,19 @@ func (client *Client) DoActionWithSigner(request requests.AcsRequest, response r
return
} else if retryTimes >= client.config.MaxRetryTime {
// timeout but reached the max retry times, return
timeoutErrorMsg := fmt.Sprintf(errors.TimeoutErrorMessage, strconv.Itoa(retryTimes+1), strconv.Itoa(retryTimes+1))
var timeoutErrorMsg string
if strings.Contains(err.Error(), "read tcp") {
timeoutErrorMsg = fmt.Sprintf(errors.TimeoutErrorMessage, strconv.Itoa(retryTimes+1), strconv.Itoa(retryTimes+1)) + " Read timeout. Please set a valid ReadTimeout."
} else {
timeoutErrorMsg = fmt.Sprintf(errors.TimeoutErrorMessage, strconv.Itoa(retryTimes+1), strconv.Itoa(retryTimes+1)) + " Connect timeout. Please set a valid ConnectTimeout."
}
err = errors.NewClientError(errors.TimeoutErrorCode, timeoutErrorMsg, err)
return
}
}
// if status code >= 500 or timeout, will trigger retry
if client.config.AutoRetry && (err != nil || isServerError(httpResponse)) {
client.setTimeout(request)
// rewrite signatureNonce and signature
httpRequest, err = client.buildRequestWithSigner(request, signer)
// buildHttpRequest(request, finalSigner, regionId)
@ -304,6 +532,7 @@ func (client *Client) DoActionWithSigner(request requests.AcsRequest, response r
}
break
}
err = responses.Unmarshal(response, httpResponse, request.GetAcceptFormat())
// wrap server errors
if serverErr, ok := err.(*errors.ServerError); ok {
@ -368,6 +597,18 @@ func NewClient() (client *Client, err error) {
return
}
func NewClientWithProvider(regionId string, providers ...provider.Provider) (client *Client, err error) {
client = &Client{}
var pc provider.Provider
if len(providers) == 0 {
pc = provider.DefaultChain
} else {
pc = provider.NewProviderChain(providers)
}
err = client.InitWithProviderChain(regionId, pc)
return
}
func NewClientWithOptions(regionId string, config *Config, credential auth.Credential) (client *Client, err error) {
client = &Client{}
err = client.InitWithOptions(regionId, config, credential)
@ -392,6 +633,12 @@ func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, role
return
}
func NewClientWithRamRoleArnAndPolicy(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (client *Client, err error) {
client = &Client{}
err = client.InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy)
return
}
func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) {
client = &Client{}
err = client.InitWithEcsRamRole(regionId, roleName)
@ -404,14 +651,10 @@ func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, se
return
}
// Deprecated: Use NewClientWithRamRoleArn in this package instead.
func NewClientWithStsRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) {
return NewClientWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName)
}
// Deprecated: Use NewClientWithEcsRamRole in this package instead.
func NewClientWithStsRoleNameOnEcs(regionId string, roleName string) (client *Client, err error) {
return NewClientWithEcsRamRole(regionId, roleName)
func NewClientWithBearerToken(regionId, bearerToken string) (client *Client, err error) {
client = &Client{}
err = client.InitWithBearerToken(regionId, bearerToken)
return
}
func (client *Client) ProcessCommonRequest(request *requests.CommonRequest) (response *responses.CommonResponse, err error) {
@ -440,3 +683,13 @@ func (client *Client) Shutdown() {
}
client.isRunning = false
}
// Deprecated: Use NewClientWithRamRoleArn in this package instead.
func NewClientWithStsRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) {
return NewClientWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName)
}
// Deprecated: Use NewClientWithEcsRamRole in this package instead.
func NewClientWithStsRoleNameOnEcs(regionId string, roleName string) (client *Client, err error) {
return NewClientWithEcsRamRole(regionId, roleName)
}

View File

@ -1,409 +0,0 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sdk
import (
"bytes"
"io/ioutil"
"net/http"
"strconv"
"testing"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/stretchr/testify/assert"
)
type signertest struct {
name string
}
func (s *signertest) GetName() string {
return ""
}
func (s *signertest) GetType() string {
return ""
}
func (s *signertest) GetVersion() string {
return ""
}
func (s *signertest) GetAccessKeyId() (string, error) {
return "", nil
}
func (s *signertest) GetExtraParam() map[string]string {
return nil
}
func (s *signertest) Sign(stringToSign, secretSuffix string) string {
return ""
}
func Test_Client(t *testing.T) {
defer func() {
err := recover()
assert.NotNil(t, err)
assert.Equal(t, "not support yet", err)
}()
NewClient()
}
func Test_NewClientWithOptions(t *testing.T) {
c := NewConfig()
c.HttpTransport = &http.Transport{
IdleConnTimeout: time.Duration(10 * time.Second),
}
c.EnableAsync = true
c.GoRoutinePoolSize = 1
c.MaxTaskQueueSize = 1
credential := credentials.NewAccessKeyCredential("acesskeyid", "accesskeysecret")
client, err := NewClientWithOptions("regionid", c, credential)
assert.Nil(t, err)
assert.NotNil(t, client)
}
func Test_NewClientWithAccessKey(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
}
func Test_NewClientWithStsToken(t *testing.T) {
client, err := NewClientWithStsToken("regionid", "acesskeyid", "accesskeysecret", "token")
assert.Nil(t, err)
assert.NotNil(t, client)
}
func Test_NewClientWithRamRoleArn(t *testing.T) {
client, err := NewClientWithRamRoleArn("regionid", "acesskeyid", "accesskeysecret", "roleArn", "roleSessionName")
assert.Nil(t, err)
assert.NotNil(t, client)
config := client.InitClientConfig()
assert.NotNil(t, config)
}
func Test_NewClientWithEcsRamRole(t *testing.T) {
client, err := NewClientWithEcsRamRole("regionid", "roleName")
assert.Nil(t, err)
assert.NotNil(t, client)
}
func Test_NewClientWithRsaKeyPair(t *testing.T) {
client, err := NewClientWithRsaKeyPair("regionid", "publicKey", "privateKey", 3600)
assert.Nil(t, err)
assert.NotNil(t, client)
}
func mockResponse(statusCode int, content string) (res *http.Response, err error) {
status := strconv.Itoa(statusCode)
res = &http.Response{
Proto: "HTTP/1.1",
ProtoMajor: 1,
Header: make(http.Header),
StatusCode: statusCode,
Status: status + " " + http.StatusText(statusCode),
}
res.Body = ioutil.NopCloser(bytes.NewReader([]byte(content)))
return
}
func Test_DoAction(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.QueryParams["PageNumber"] = "1"
request.QueryParams["PageSize"] = "30"
request.TransToAcsRequest()
response := responses.NewCommonResponse()
origTestHookDo := hookDo
defer func() { hookDo = origTestHookDo }()
hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) {
return func(req *http.Request) (*http.Response, error) {
return mockResponse(200, "")
}
}
err = client.DoAction(request, response)
assert.Nil(t, err)
assert.Equal(t, 200, response.GetHttpStatus())
assert.Equal(t, "", response.GetHttpContentString())
client.Shutdown()
assert.Equal(t, false, client.isRunning)
}
func Test_DoAction_Timeout(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.QueryParams["PageNumber"] = "1"
request.QueryParams["PageSize"] = "30"
request.TransToAcsRequest()
response := responses.NewCommonResponse()
origTestHookDo := hookDo
defer func() { hookDo = origTestHookDo }()
hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) {
return func(req *http.Request) (*http.Response, error) {
return mockResponse(200, "")
}
}
err = client.DoAction(request, response)
assert.Nil(t, err)
assert.Equal(t, 200, response.GetHttpStatus())
assert.Equal(t, "", response.GetHttpContentString())
client.Shutdown()
assert.Equal(t, false, client.isRunning)
}
func Test_ProcessCommonRequest(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.QueryParams["PageNumber"] = "1"
request.QueryParams["PageSize"] = "30"
origTestHookDo := hookDo
defer func() { hookDo = origTestHookDo }()
hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) {
return func(req *http.Request) (*http.Response, error) {
return mockResponse(200, "")
}
}
response, err := client.ProcessCommonRequest(request)
assert.Nil(t, err)
assert.Equal(t, 200, response.GetHttpStatus())
assert.Equal(t, "", response.GetHttpContentString())
}
func Test_DoAction_With500(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.QueryParams["PageNumber"] = "1"
request.QueryParams["PageSize"] = "30"
request.TransToAcsRequest()
response := responses.NewCommonResponse()
origTestHookDo := hookDo
defer func() { hookDo = origTestHookDo }()
hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) {
return func(req *http.Request) (*http.Response, error) {
return mockResponse(500, "Server Internel Error")
}
}
err = client.DoAction(request, response)
assert.NotNil(t, err)
assert.Equal(t, "SDK.ServerError\nErrorCode: \nRecommend: \nRequestId: \nMessage: Server Internel Error", err.Error())
assert.Equal(t, 500, response.GetHttpStatus())
assert.Equal(t, "Server Internel Error", response.GetHttpContentString())
}
func TestClient_BuildRequestWithSigner(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.QueryParams["PageNumber"] = "1"
request.QueryParams["PageSize"] = "30"
request.RegionId = "regionid"
request.TransToAcsRequest()
client.config.UserAgent = "user_agent"
err = client.BuildRequestWithSigner(request, nil)
assert.Nil(t, err)
}
func TestClient_BuildRequestWithSigner1(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.QueryParams["PageNumber"] = "1"
request.QueryParams["PageSize"] = "30"
request.RegionId = "regionid"
request.TransToAcsRequest()
signer := &signertest{
name: "signer",
}
err = client.BuildRequestWithSigner(request, signer)
assert.Nil(t, err)
}
func TestClient_ProcessCommonRequestWithSigner(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.QueryParams["PageNumber"] = "1"
request.QueryParams["PageSize"] = "30"
request.RegionId = "regionid"
signer := &signertest{
name: "signer",
}
_, err = client.ProcessCommonRequestWithSigner(request, signer)
assert.NotNil(t, err)
}
func TestClient_AppendUserAgent(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.RegionId = "regionid"
signer := &signertest{
name: "signer",
}
request.TransToAcsRequest()
httpRequest, err := client.buildRequestWithSigner(request, signer)
assert.Nil(t, err)
assert.Equal(t, DefaultUserAgent, httpRequest.Header.Get("User-Agent"))
client.AppendUserAgent("test", "1.01")
httpRequest, err = client.buildRequestWithSigner(request, signer)
assert.Equal(t, DefaultUserAgent+" test/1.01", httpRequest.Header.Get("User-Agent"))
request.AppendUserAgent("test", "2.01")
httpRequest, err = client.buildRequestWithSigner(request, signer)
assert.Equal(t, DefaultUserAgent+" test/2.01", httpRequest.Header.Get("User-Agent"))
request.AppendUserAgent("test", "2.02")
httpRequest, err = client.buildRequestWithSigner(request, signer)
assert.Equal(t, DefaultUserAgent+" test/2.02", httpRequest.Header.Get("User-Agent"))
client.AppendUserAgent("test", "2.01")
httpRequest, err = client.buildRequestWithSigner(request, signer)
assert.Equal(t, DefaultUserAgent+" test/2.02", httpRequest.Header.Get("User-Agent"))
client.AppendUserAgent("core", "1.01")
httpRequest, err = client.buildRequestWithSigner(request, signer)
assert.Equal(t, DefaultUserAgent+" test/2.02", httpRequest.Header.Get("User-Agent"))
request.AppendUserAgent("core", "1.01")
httpRequest, err = client.buildRequestWithSigner(request, signer)
assert.Equal(t, DefaultUserAgent+" test/2.02", httpRequest.Header.Get("User-Agent"))
request1 := requests.NewCommonRequest()
request1.Domain = "ecs.aliyuncs.com"
request1.Version = "2014-05-26"
request1.ApiName = "DescribeRegions"
request1.RegionId = "regionid"
request1.AppendUserAgent("sys", "1.01")
request1.TransToAcsRequest()
httpRequest, err = client.buildRequestWithSigner(request1, signer)
assert.Nil(t, err)
assert.Equal(t, DefaultUserAgent+" test/2.01 sys/1.01", httpRequest.Header.Get("User-Agent"))
}
func TestClient_ProcessCommonRequestWithSigner_Error(t *testing.T) {
client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
request := requests.NewCommonRequest()
request.Domain = "ecs.aliyuncs.com"
request.Version = "2014-05-26"
request.ApiName = "DescribeInstanceStatus"
request.QueryParams["PageNumber"] = "1"
request.QueryParams["PageSize"] = "30"
request.RegionId = "regionid"
defer func() {
err := recover()
assert.NotNil(t, err)
}()
_, err = client.ProcessCommonRequestWithSigner(request, nil)
assert.NotNil(t, err)
}
func TestClient_NewClientWithStsRoleNameOnEcs(t *testing.T) {
client, err := NewClientWithStsRoleNameOnEcs("regionid", "rolename")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
config := client.GetConfig()
assert.NotNil(t, config)
err = client.AddAsyncTask(nil)
assert.NotNil(t, err)
}
func TestClient_NewClientWithStsRoleArn(t *testing.T) {
client, err := NewClientWithStsRoleArn("regionid", "acesskeyid", "accesskeysecret", "rolearn", "rolesessionname")
assert.Nil(t, err)
assert.NotNil(t, client)
assert.Equal(t, true, client.isRunning)
task := func() {}
client.asyncTaskQueue = make(chan func(), 1)
err = client.AddAsyncTask(task)
assert.Nil(t, err)
client.Shutdown()
assert.Equal(t, false, client.isRunning)
}
//func Test_EnableAsync(t *testing.T) {
// client, err := NewClientWithAccessKey("regionid", "acesskeyid", "accesskeysecret")
// assert.Nil(t, err)
// assert.NotNil(t, client)
// assert.Equal(t, true, client.isRunning)
// client.EnableAsync(2, 8)
// client.Shutdown()
// assert.Equal(t, false, client.isRunning)
//}

View File

@ -1,52 +0,0 @@
package sdk
import (
"net/http"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func Test_Config(t *testing.T) {
config := NewConfig()
assert.NotNil(t, config, "NewConfig failed")
assert.Equal(t, true, config.AutoRetry, "Default AutoRetry should be true")
assert.Equal(t, 3, config.MaxRetryTime, "Default MaxRetryTime should be 3")
assert.Equal(t, "", config.UserAgent, "Default UserAgent should be empty")
assert.Equal(t, false, config.Debug, "Default AutoRetry should be false")
assert.Equal(t, time.Duration(10000000000), config.Timeout, "Default Timeout should be 10000000000")
assert.Equal(t, (*http.Transport)(nil), config.HttpTransport, "Default HttpTransport should be nil")
assert.Equal(t, false, config.EnableAsync, "Default EnableAsync should be false")
assert.Equal(t, 1000, config.MaxTaskQueueSize, "Default MaxTaskQueueSize should be 1000")
assert.Equal(t, 5, config.GoRoutinePoolSize, "Default GoRoutinePoolSize should be 5")
assert.Equal(t, "HTTP", config.Scheme, "Default Scheme should be HTTP")
transport := &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
DisableCompression: true,
}
config.
WithAutoRetry(false).
WithMaxRetryTime(0).
WithUserAgent("new user agent").
WithDebug(true).
WithTimeout(time.Duration(500000)).
WithHttpTransport(transport).
WithEnableAsync(true).
WithMaxTaskQueueSize(1).
WithGoRoutinePoolSize(10).
WithScheme("HTTPS")
assert.Equal(t, 0, config.MaxRetryTime)
assert.Equal(t, false, config.AutoRetry)
assert.Equal(t, "new user agent", config.UserAgent)
assert.Equal(t, true, config.Debug)
assert.Equal(t, time.Duration(500000), config.Timeout)
assert.Equal(t, transport, config.HttpTransport)
assert.Equal(t, true, config.EnableAsync)
assert.Equal(t, 1, config.MaxTaskQueueSize)
assert.Equal(t, 10, config.GoRoutinePoolSize)
assert.Equal(t, "HTTPS", config.Scheme)
}

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,8 @@ import (
)
const SignatureDostNotMatchErrorCode = "SignatureDoesNotMatch"
const MessagePrefix = "Specified signature is not matched with our calculation. server string to sign is:"
const IncompleteSignatureErrorCode = "IncompleteSignature"
const MessageContain = "server string to sign is:"
var debug utils.Debug
@ -20,14 +21,15 @@ type SignatureDostNotMatchWrapper struct {
func (*SignatureDostNotMatchWrapper) tryWrap(error *ServerError, wrapInfo map[string]string) (ok bool) {
clientStringToSign := wrapInfo["StringToSign"]
if error.errorCode == SignatureDostNotMatchErrorCode && clientStringToSign != "" {
if (error.errorCode == SignatureDostNotMatchErrorCode || error.errorCode == IncompleteSignatureErrorCode) && clientStringToSign != "" {
message := error.message
if strings.HasPrefix(message, MessagePrefix) {
serverStringToSign := message[len(MessagePrefix):]
if strings.Contains(message, MessageContain) {
str := strings.Split(message, MessageContain)
serverStringToSign := str[1]
if clientStringToSign == serverStringToSign {
// user secret is error
error.recommend = "Please check you AccessKeySecret"
error.recommend = "InvalidAccessKeySecret: Please check you AccessKeySecret"
} else {
debug("Client StringToSign: %s", clientStringToSign)
debug("Server StringToSign: %s", serverStringToSign)

View File

@ -20,6 +20,7 @@ import (
"reflect"
"strconv"
"strings"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
)
@ -72,6 +73,12 @@ type AcsRequest interface {
GetAcceptFormat() string
GetLocationServiceCode() string
GetLocationEndpointType() string
GetReadTimeout() time.Duration
GetConnectTimeout() time.Duration
SetReadTimeout(readTimeout time.Duration)
SetConnectTimeout(connectTimeout time.Duration)
SetHTTPSInsecure(isInsecure bool)
GetHTTPSInsecure() *bool
GetUserAgent() map[string]string
@ -97,6 +104,9 @@ type baseRequest struct {
Domain string
Port string
RegionId string
ReadTimeout time.Duration
ConnectTimeout time.Duration
isInsecure *bool
userAgent map[string]string
product string
@ -127,6 +137,30 @@ func (request *baseRequest) GetFormParams() map[string]string {
return request.FormParams
}
func (request *baseRequest) GetReadTimeout() time.Duration {
return request.ReadTimeout
}
func (request *baseRequest) GetConnectTimeout() time.Duration {
return request.ConnectTimeout
}
func (request *baseRequest) SetReadTimeout(readTimeout time.Duration) {
request.ReadTimeout = readTimeout
}
func (request *baseRequest) SetConnectTimeout(connectTimeout time.Duration) {
request.ConnectTimeout = connectTimeout
}
func (request *baseRequest) GetHTTPSInsecure() *bool {
return request.isInsecure
}
func (request *baseRequest) SetHTTPSInsecure(isInsecure bool) {
request.isInsecure = &isInsecure
}
func (request *baseRequest) GetContent() []byte {
return request.Content
}
@ -294,7 +328,7 @@ func flatRepeatedList(dataValue reflect.Value, request AcsRequest, position, pre
for m := 0; m < repeatedFieldValue.Len(); m++ {
elementValue := repeatedFieldValue.Index(m)
key := prefix + name + "." + strconv.Itoa(m+1)
if elementValue.Type().String() == "string" {
if elementValue.Type().Kind().String() == "string" {
value := elementValue.String()
err = addParam(request, fieldPosition, key, value)
if err != nil {

View File

@ -1,148 +0,0 @@
package requests
import (
"bytes"
"io"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_AcsRequest(t *testing.T) {
r := defaultBaseRequest()
assert.NotNil(t, r)
// query params
query := r.GetQueryParams()
assert.Equal(t, 0, len(query))
r.addQueryParam("key", "value")
assert.Equal(t, 1, len(query))
assert.Equal(t, "value", query["key"])
// form params
form := r.GetFormParams()
assert.Equal(t, 0, len(form))
r.addFormParam("key", "value")
assert.Equal(t, 1, len(form))
assert.Equal(t, "value", form["key"])
// getter/setter for stringtosign
assert.Equal(t, "", r.GetStringToSign())
r.SetStringToSign("s2s")
assert.Equal(t, "s2s", r.GetStringToSign())
// content type
_, contains := r.GetContentType()
assert.False(t, contains)
r.SetContentType("application/json")
ct, contains := r.GetContentType()
assert.Equal(t, "application/json", ct)
assert.True(t, contains)
// default 3 headers & content-type
headers := r.GetHeaders()
assert.Equal(t, 4, len(headers))
r.addHeaderParam("x-key", "x-key-value")
assert.Equal(t, 5, len(headers))
assert.Equal(t, "x-key-value", headers["x-key"])
// GetVersion
assert.Equal(t, "", r.GetVersion())
// GetActionName
assert.Equal(t, "", r.GetActionName())
// GetMethod
assert.Equal(t, "GET", r.GetMethod())
r.Method = "POST"
assert.Equal(t, "POST", r.GetMethod())
// Domain
assert.Equal(t, "", r.GetDomain())
r.SetDomain("ecs.aliyuncs.com")
assert.Equal(t, "ecs.aliyuncs.com", r.GetDomain())
// Region
assert.Equal(t, "", r.GetRegionId())
r.RegionId = "cn-hangzhou"
assert.Equal(t, "cn-hangzhou", r.GetRegionId())
// AcceptFormat
assert.Equal(t, "JSON", r.GetAcceptFormat())
r.AcceptFormat = "XML"
assert.Equal(t, "XML", r.GetAcceptFormat())
// GetLocationServiceCode
assert.Equal(t, "", r.GetLocationServiceCode())
// GetLocationEndpointType
assert.Equal(t, "", r.GetLocationEndpointType())
// GetProduct
assert.Equal(t, "", r.GetProduct())
// GetScheme
assert.Equal(t, "", r.GetScheme())
r.SetScheme("HTTPS")
assert.Equal(t, "HTTPS", r.GetScheme())
// GetPort
assert.Equal(t, "", r.GetPort())
// GetUserAgent
r.AppendUserAgent("cli", "1.01")
assert.Equal(t, "1.01", r.GetUserAgent()["cli"])
// Content
assert.Equal(t, []byte(nil), r.GetContent())
r.SetContent([]byte("The Content"))
assert.True(t, bytes.Equal([]byte("The Content"), r.GetContent()))
}
type AcsRequestTest struct {
*baseRequest
Ontology AcsRequest
Query string `position:"Query" name:"Query"`
Header string `position:"Header" name:"Header"`
Path string `position:"Path" name:"Path"`
Body string `position:"Body" name:"Body"`
TypeAcs *[]string `position:"type" name:"type" type:"Repeated"`
}
func (r AcsRequestTest) BuildQueries() string {
return ""
}
func (r AcsRequestTest) BuildUrl() string {
return ""
}
func (r AcsRequestTest) GetBodyReader() io.Reader {
return nil
}
func (r AcsRequestTest) GetStyle() string {
return ""
}
func (r AcsRequestTest) addPathParam(key, value string) {
return
}
func Test_AcsRequest_InitParams(t *testing.T) {
r := &AcsRequestTest{
baseRequest: defaultBaseRequest(),
Query: "query value",
Header: "header value",
Path: "path value",
Body: "body value",
}
tmp := []string{r.Query, r.Header}
r.TypeAcs = &tmp
r.addQueryParam("qkey", "qvalue")
InitParams(r)
queries := r.GetQueryParams()
assert.Equal(t, "query value", queries["Query"])
headers := r.GetHeaders()
assert.Equal(t, "header value", headers["Header"])
// TODO: check the body & path
}

View File

@ -1,82 +0,0 @@
package requests
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_NewCommonRequest(t *testing.T) {
r := NewCommonRequest()
assert.NotNil(t, r)
assert.Equal(t, "common", r.GetHeaders()["x-sdk-invoke-type"])
assert.Equal(t, 0, len(r.PathParams))
r.addPathParam("name", "value")
assert.Equal(t, "value", r.PathParams["name"])
}
func Test_CommonRequest_TransToAcsRequest(t *testing.T) {
r := NewCommonRequest()
assert.NotNil(t, r)
r.TransToAcsRequest()
assert.Equal(t, "RPC", r.GetStyle())
r2 := NewCommonRequest()
assert.NotNil(t, r2)
r2.PathPattern = "/users/[user]"
r2.TransToAcsRequest()
assert.Equal(t, "ROA", r2.GetStyle())
}
func Test_CommonRequest_String(t *testing.T) {
r := NewCommonRequest()
assert.NotNil(t, r)
r.SetDomain("domain")
expected := `GET /? /1.1
Host: domain
Accept-Encoding: identity
x-sdk-client: golang/1.0.0
x-sdk-invoke-type: common
`
assert.Equal(t, expected, r.String())
r.SetContent([]byte("content"))
expected = `GET /? /1.1
Host: domain
Accept-Encoding: identity
x-sdk-client: golang/1.0.0
x-sdk-invoke-type: common
content
`
assert.Equal(t, expected, r.String())
}
func Test_CommonRequest_BuildUrl(t *testing.T) {
r := NewCommonRequest()
assert.NotNil(t, r)
r.SetDomain("host")
r.SetScheme("http")
r.TransToAcsRequest()
assert.Equal(t, "http://host/?", r.BuildUrl())
r.Port = "8080"
assert.Equal(t, "http://host:8080/?", r.BuildUrl())
}
func Test_CommonRequest_GetBodyReader(t *testing.T) {
r := NewCommonRequest()
r.TransToAcsRequest()
reader := r.GetBodyReader()
b, _ := ioutil.ReadAll(reader)
assert.Equal(t, "", string(b))
}

View File

@ -134,7 +134,7 @@ func (request *RoaRequest) InitWithApiInfo(product, version, action, uriPattern,
request.pathPattern = uriPattern
request.locationServiceCode = serviceCode
request.locationEndpointType = endpointType
//request.product = product
request.product = product
//request.version = version
//request.actionName = action
}
@ -142,7 +142,7 @@ func (request *RoaRequest) InitWithApiInfo(product, version, action, uriPattern,
func (request *RoaRequest) initWithCommonRequest(commonRequest *CommonRequest) {
request.baseRequest = commonRequest.baseRequest
request.PathParams = commonRequest.PathParams
//request.product = commonRequest.Product
request.product = commonRequest.Product
//request.version = commonRequest.Version
request.Headers["x-acs-version"] = commonRequest.Version
//request.actionName = commonRequest.ApiName

View File

@ -1,116 +0,0 @@
package requests
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_RoaRequest(t *testing.T) {
r := &RoaRequest{}
r.InitWithApiInfo("product", "version", "action", "/", "serviceCode", "endpointType")
assert.NotNil(t, r)
assert.Equal(t, "GET", r.GetMethod())
assert.Equal(t, "ROA", r.GetStyle())
// assert.Equal(t, "version", r.GetVersion())
// assert.Equal(t, "action", r.GetActionName())
assert.Equal(t, "serviceCode", r.GetLocationServiceCode())
assert.Equal(t, "endpointType", r.GetLocationEndpointType())
}
func Test_RoaRequest_initWithCommonRequest(t *testing.T) {
r := &RoaRequest{}
common := NewCommonRequest()
r.initWithCommonRequest(common)
assert.NotNil(t, r)
assert.Equal(t, "GET", r.GetMethod())
assert.Equal(t, "ROA", r.GetStyle())
assert.Equal(t, "common", r.Headers["x-sdk-invoke-type"])
// assert.Equal(t, "version", r.GetVersion())
// assert.Equal(t, "action", r.GetActionName())
}
func Test_RoaRequest_BuildQueries(t *testing.T) {
// url
r := &RoaRequest{}
r.InitWithApiInfo("product", "version", "action", "/", "serviceCode", "endpointType")
assert.Equal(t, "/", r.BuildQueries())
r.addQueryParam("key", "value")
assert.Equal(t, "/?key=value", r.BuildQueries())
r.addQueryParam("key2", "value2")
assert.Equal(t, "/?key=value&key2=value2", r.BuildQueries())
// assert.Equal(t, "/?key=https%3A%2F%2Fdomain%2F%3Fq%3Dv", r.BuildQueries())
}
func Test_RoaRequest_BuildUrl(t *testing.T) {
r := &RoaRequest{}
r.InitWithApiInfo("product", "version", "action", "/", "serviceCode", "endpointType")
r.Domain = "domain.com"
r.Scheme = "http"
r.Port = "80"
assert.Equal(t, "http://domain.com:80/", r.BuildUrl())
r.addQueryParam("key", "value")
assert.Equal(t, "http://domain.com:80/?key=value", r.BuildUrl())
r.addQueryParam("key", "https://domain/?q=v")
assert.Equal(t, "http://domain.com:80/?key=https%3A%2F%2Fdomain%2F%3Fq%3Dv", r.BuildUrl())
r.addQueryParam("url", "https://domain/?q1=v1&q2=v2")
assert.Equal(t, "http://domain.com:80/?key=https%3A%2F%2Fdomain%2F%3Fq%3Dv&url=https%3A%2F%2Fdomain%2F%3Fq1%3Dv1%26q2%3Dv2", r.BuildUrl())
}
func Test_RoaRequest_BuildUrl2(t *testing.T) {
r := &RoaRequest{}
r.InitWithApiInfo("product", "version", "action", "/", "serviceCode", "endpointType")
r.Domain = "domain.com"
r.Scheme = "http"
r.Port = "80"
assert.Equal(t, "http://domain.com:80/", r.BuildUrl())
r.addPathParam("key", "value")
assert.Equal(t, "http://domain.com:80/", r.BuildUrl())
r = &RoaRequest{}
r.InitWithApiInfo("product", "version", "action", "/users/[user]", "serviceCode", "endpointType")
r.Domain = "domain.com"
r.Scheme = "http"
r.Port = "80"
r.addPathParam("user", "name")
assert.Equal(t, "http://domain.com:80/users/name", r.BuildUrl())
r.addQueryParam("key", "value")
assert.Equal(t, "http://domain.com:80/users/name?key=value", r.BuildUrl())
}
func Test_RoaRequest_GetBodyReader_Nil(t *testing.T) {
r := &RoaRequest{}
r.InitWithApiInfo("product", "version", "action", "/", "serviceCode", "endpointType")
reader := r.GetBodyReader()
assert.Nil(t, reader)
}
func Test_RoaRequest_GetBodyReader_Form(t *testing.T) {
r := &RoaRequest{}
r.InitWithApiInfo("product", "version", "action", "/", "serviceCode", "endpointType")
r.addFormParam("key", "value")
reader := r.GetBodyReader()
b, _ := ioutil.ReadAll(reader)
assert.Equal(t, "key=value", string(b))
}
func Test_RoaRequest_GetBodyReader_Content(t *testing.T) {
r := &RoaRequest{}
r.InitWithApiInfo("product", "version", "action", "/", "serviceCode", "endpointType")
r.SetContent([]byte("Hello world"))
reader := r.GetBodyReader()
b, _ := ioutil.ReadAll(reader)
assert.Equal(t, "Hello world", string(b))
}
// func Test_RoaRequest_addPathParam(t *testing.T) {
// r := &RoaRequest{}
// r.InitWithApiInfo("product", "version", "action", "/", "serviceCode", "endpointType")
// r.addPathParam("key", "value")
// }

View File

@ -1,70 +0,0 @@
package requests
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_RpcRequest(t *testing.T) {
r := &RpcRequest{}
r.InitWithApiInfo("product", "version", "action", "serviceCode", "endpointType")
assert.NotNil(t, r)
assert.Equal(t, "POST", r.GetMethod())
assert.Equal(t, "RPC", r.GetStyle())
assert.Equal(t, "product", r.GetProduct())
assert.Equal(t, "version", r.GetVersion())
assert.Equal(t, "action", r.GetActionName())
assert.Equal(t, "serviceCode", r.GetLocationServiceCode())
assert.Equal(t, "endpointType", r.GetLocationEndpointType())
}
func Test_RpcRequest_BuildQueries(t *testing.T) {
// url
r := &RpcRequest{}
r.InitWithApiInfo("product", "version", "action", "serviceCode", "endpointType")
assert.Equal(t, "/?", r.BuildQueries())
r.addQueryParam("key", "value")
assert.Equal(t, "/?key=value", r.BuildQueries())
r.addQueryParam("key", "https://domain/?q=v")
assert.Equal(t, "/?key=https%3A%2F%2Fdomain%2F%3Fq%3Dv", r.BuildQueries())
}
func Test_RpcRequest_BuildUrl(t *testing.T) {
r := &RpcRequest{}
r.InitWithApiInfo("product", "version", "action", "serviceCode", "endpointType")
r.Domain = "domain.com"
r.Scheme = "http"
r.Port = "80"
assert.Equal(t, "http://domain.com:80/?", r.BuildUrl())
r.addQueryParam("key", "value")
assert.Equal(t, "http://domain.com:80/?key=value", r.BuildUrl())
r.addQueryParam("key", "https://domain/?q=v")
assert.Equal(t, "http://domain.com:80/?key=https%3A%2F%2Fdomain%2F%3Fq%3Dv", r.BuildUrl())
}
func Test_RpcRequest_GetBodyReader(t *testing.T) {
r := &RpcRequest{}
r.InitWithApiInfo("product", "version", "action", "serviceCode", "endpointType")
reader := r.GetBodyReader()
b, _ := ioutil.ReadAll(reader)
assert.Equal(t, "", string(b))
r.addFormParam("key", "value")
reader = r.GetBodyReader()
b, _ = ioutil.ReadAll(reader)
assert.Equal(t, "key=value", string(b))
}
func Test_RpcRequest_addPathParam(t *testing.T) {
defer func() { //进行异常捕捉
err := recover()
assert.NotNil(t, err)
assert.Equal(t, "not support", err)
}()
r := &RpcRequest{}
r.InitWithApiInfo("product", "version", "action", "serviceCode", "endpointType")
r.addPathParam("key", "value")
}

View File

@ -1,51 +0,0 @@
package requests
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewInteger(t *testing.T) {
integer := NewInteger(123123)
assert.True(t, integer.HasValue())
value, err := integer.GetValue()
assert.Nil(t, err)
assert.Equal(t, 123123, value)
var expected Integer
expected = "123123"
assert.Equal(t, expected, integer)
}
func TestNewInteger64(t *testing.T) {
long := NewInteger64(123123123123123123)
assert.True(t, long.HasValue())
value, err := long.GetValue64()
assert.Nil(t, err)
assert.Equal(t, int64(123123123123123123), value)
var expected Integer
expected = "123123123123123123"
assert.Equal(t, expected, long)
}
func TestNewBoolean(t *testing.T) {
boolean := NewBoolean(false)
assert.True(t, boolean.HasValue())
value, err := boolean.GetValue()
assert.Nil(t, err)
assert.Equal(t, false, value)
var expected Boolean
expected = "false"
assert.Equal(t, expected, boolean)
}
func TestNewFloat(t *testing.T) {
float := NewFloat(123123.123123)
assert.True(t, float.HasValue())
value, err := float.GetValue()
assert.Nil(t, err)
assert.Equal(t, 123123.123123, value)
var expected Float
expected = "123123.123123"
assert.Equal(t, expected, float)
}

View File

@ -1,6 +0,0 @@
package resource
func GetTZData(name string) ([]byte, bool) {
data, ok := files["zoneinfo/"+name]
return data, ok
}

File diff suppressed because one or more lines are too long

View File

@ -9,7 +9,7 @@ import (
"sync"
"unsafe"
"github.com/json-iterator/go"
jsoniter "github.com/json-iterator/go"
)
const maxUint = ^uint(0)
@ -22,7 +22,12 @@ var initJson = &sync.Once{}
func initJsonParserOnce() {
initJson.Do(func() {
registerBetterFuzzyDecoder()
jsonParser = jsoniter.ConfigCompatibleWithStandardLibrary
jsonParser = jsoniter.Config{
EscapeHTML: true,
SortMapKeys: true,
ValidateJsonRawMessage: true,
CaseSensitive: true,
}.Froze()
})
}

View File

@ -23,6 +23,7 @@ import (
"strings"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
)
type AcsResponse interface {
@ -35,6 +36,11 @@ type AcsResponse interface {
parseFromHttpResponse(httpResponse *http.Response) error
}
var debug utils.Debug
func init() {
debug = utils.Init("sdk")
}
// Unmarshal object from http response body to target Response
func Unmarshal(response AcsResponse, httpResponse *http.Response, format string) (err error) {
err = response.parseFromHttpResponse(httpResponse)
@ -109,6 +115,7 @@ func (baseResponse *BaseResponse) parseFromHttpResponse(httpResponse *http.Respo
if err != nil {
return
}
debug("%s", string(body))
baseResponse.httpStatus = httpResponse.StatusCode
baseResponse.httpHeaders = httpResponse.Header
baseResponse.httpContentBytes = body

View File

@ -26,10 +26,6 @@ import (
"github.com/satori/go.uuid"
)
// if you use go 1.10 or higher, you can hack this util by these to avoid "TimeZone.zip not found" on Windows
var LoadLocationFromTZData func(name string, data []byte) (*time.Location, error) = nil
var TZData []byte = nil
func GetUUIDV4() (uuidHex string) {
uuidV4 := uuid.NewV4()
uuidHex = hex.EncodeToString(uuidV4.Bytes())
@ -44,29 +40,15 @@ func GetMD5Base64(bytes []byte) (base64Value string) {
return
}
func GetGMTLocation() (*time.Location, error) {
if LoadLocationFromTZData != nil && TZData != nil {
return LoadLocationFromTZData("GMT", TZData)
} else {
return time.LoadLocation("GMT")
}
}
func GetTimeInFormatISO8601() (timeStr string) {
gmt, err := GetGMTLocation()
gmt := time.FixedZone("GMT", 0)
if err != nil {
panic(err)
}
return time.Now().In(gmt).Format("2006-01-02T15:04:05Z")
}
func GetTimeInFormatRFC2616() (timeStr string) {
gmt, err := GetGMTLocation()
gmt := time.FixedZone("GMT", 0)
if err != nil {
panic(err)
}
return time.Now().In(gmt).Format("Mon, 02 Jan 2006 15:04:05 GMT")
}

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 1999-2017 Alibaba Group Holding Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,52 @@
package tablestore
import (
"errors"
"fmt"
)
var (
errMissMustHeader = func(header string) error {
return errors.New("[tablestore] miss must header: " + header)
}
errTableNameTooLong = func(name string) error {
return errors.New("[tablestore] table name: \"" + name + "\" too long")
}
errInvalidPartitionType = errors.New("[tablestore] invalid partition key")
errMissPrimaryKey = errors.New("[tablestore] missing primary key")
errPrimaryKeyTooMuch = errors.New("[tablestore] primary key too much")
errMultiDeleteRowsTooMuch = errors.New("[tablestore] multi delete rows too much")
errCreateTableNoPrimaryKey = errors.New("[tablestore] create table no primary key")
errUnexpectIoEnd = errors.New("[tablestore] unexpect io end")
errTag = errors.New("[tablestore] unexpect tag")
errNoChecksum = errors.New("[tablestore] expect checksum")
errChecksum = errors.New("[tablestore] checksum failed")
errInvalidInput = errors.New("[tablestore] invalid input")
)
const (
OTS_CLIENT_UNKNOWN = "OTSClientUnknownError"
ROW_OPERATION_CONFLICT = "OTSRowOperationConflict"
NOT_ENOUGH_CAPACITY_UNIT = "OTSNotEnoughCapacityUnit"
TABLE_NOT_READY = "OTSTableNotReady"
PARTITION_UNAVAILABLE = "OTSPartitionUnavailable"
SERVER_BUSY = "OTSServerBusy"
STORAGE_SERVER_BUSY = "OTSStorageServerBusy"
QUOTA_EXHAUSTED = "OTSQuotaExhausted"
STORAGE_TIMEOUT = "OTSTimeout"
SERVER_UNAVAILABLE = "OTSServerUnavailable"
INTERNAL_SERVER_ERROR = "OTSInternalServerError"
)
type OtsError struct {
Code string
Message string
RequestId string
}
func (e *OtsError) Error() string {
return fmt.Sprintf("%s %s %s", e.Code, e.Message, e.RequestId)
}

View File

@ -0,0 +1,22 @@
package tablestore
type TableStoreApi interface {
CreateTable(request *CreateTableRequest) (*CreateTableResponse, error)
ListTable() (*ListTableResponse, error)
DeleteTable(request *DeleteTableRequest) (*DeleteTableResponse, error)
DescribeTable(request *DescribeTableRequest) (*DescribeTableResponse, error)
UpdateTable(request *UpdateTableRequest) (*UpdateTableResponse, error)
PutRow(request *PutRowRequest) (*PutRowResponse, error)
DeleteRow(request *DeleteRowRequest) (*DeleteRowResponse, error)
GetRow(request *GetRowRequest) (*GetRowResponse, error)
UpdateRow(request *UpdateRowRequest) (*UpdateRowResponse, error)
BatchGetRow(request *BatchGetRowRequest) (*BatchGetRowResponse, error)
BatchWriteRow(request *BatchWriteRowRequest) (*BatchWriteRowResponse, error)
GetRange(request *GetRangeRequest) (*GetRangeResponse, error)
// stream related
ListStream(request *ListStreamRequest) (*ListStreamResponse, error)
DescribeStream(request *DescribeStreamRequest) (*DescribeStreamResponse, error)
GetShardIterator(request *GetShardIteratorRequest) (*GetShardIteratorResponse, error)
GetStreamRecord(request *GetStreamRecordRequest) (*GetStreamRecordResponse, error)
}

View File

@ -0,0 +1,855 @@
package tablestore
import (
"fmt"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
"math/rand"
"net/http"
"strconv"
"strings"
"time"
//"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
)
// @class TableStoreClient
// The TableStoreClient, which will connect OTS service for authorization, create/list/
// delete tables/table groups, to get/put/delete a row.
// Note: TableStoreClient is thread-safe.
// TableStoreClient的功能包括连接OTS服务进行验证、创建/列出/删除表或表组、插入/获取/
// 删除/更新行数据
type TableStoreClient struct {
endPoint string
instanceName string
accessKeyId string
accessKeySecret string
securityToken string
httpClient IHttpClient
config *TableStoreConfig
random *rand.Rand
}
type ClientOption func(*TableStoreClient)
type TableStoreHttpClient struct {
httpClient *http.Client
}
// use this to mock http.client for testing
type IHttpClient interface {
Do(*http.Request) (*http.Response, error)
New(*http.Client)
}
func (httpClient *TableStoreHttpClient) Do(req *http.Request) (*http.Response, error) {
return httpClient.httpClient.Do(req)
}
func (httpClient *TableStoreHttpClient) New(client *http.Client) {
httpClient.httpClient = client
}
type HTTPTimeout struct {
ConnectionTimeout time.Duration
RequestTimeout time.Duration
}
type TableStoreConfig struct {
RetryTimes uint
MaxRetryTime time.Duration
HTTPTimeout HTTPTimeout
MaxIdleConnections int
}
func NewDefaultTableStoreConfig() *TableStoreConfig {
httpTimeout := &HTTPTimeout{
ConnectionTimeout: time.Second * 15,
RequestTimeout: time.Second * 30}
config := &TableStoreConfig{
RetryTimes: 10,
HTTPTimeout: *httpTimeout,
MaxRetryTime: time.Second * 5,
MaxIdleConnections: 2000}
return config
}
type CreateTableRequest struct {
TableMeta *TableMeta
TableOption *TableOption
ReservedThroughput *ReservedThroughput
StreamSpec *StreamSpecification
IndexMetas []*IndexMeta
}
type CreateIndexRequest struct {
MainTableName string
IndexMeta *IndexMeta
IncludeBaseData bool
}
type DeleteIndexRequest struct {
MainTableName string
IndexName string
}
type ResponseInfo struct {
RequestId string
}
type CreateTableResponse struct {
ResponseInfo
}
type CreateIndexResponse struct {
ResponseInfo
}
type DeleteIndexResponse struct {
ResponseInfo
}
type DeleteTableResponse struct {
ResponseInfo
}
type TableMeta struct {
TableName string
SchemaEntry []*PrimaryKeySchema
DefinedColumns []*DefinedColumnSchema
}
type PrimaryKeySchema struct {
Name *string
Type *PrimaryKeyType
Option *PrimaryKeyOption
}
type PrimaryKey struct {
PrimaryKeys []*PrimaryKeyColumn
}
type TableOption struct {
TimeToAlive, MaxVersion int
}
type ReservedThroughput struct {
Readcap, Writecap int
}
type ListTableResponse struct {
TableNames []string
ResponseInfo
}
type DeleteTableRequest struct {
TableName string
}
type DescribeTableRequest struct {
TableName string
}
type DescribeTableResponse struct {
TableMeta *TableMeta
TableOption *TableOption
ReservedThroughput *ReservedThroughput
StreamDetails *StreamDetails
IndexMetas []*IndexMeta
ResponseInfo
}
type UpdateTableRequest struct {
TableName string
TableOption *TableOption
ReservedThroughput *ReservedThroughput
StreamSpec *StreamSpecification
}
type UpdateTableResponse struct {
TableOption *TableOption
ReservedThroughput *ReservedThroughput
StreamDetails *StreamDetails
ResponseInfo
}
type ConsumedCapacityUnit struct {
Read int32
Write int32
}
type PutRowResponse struct {
ConsumedCapacityUnit *ConsumedCapacityUnit
PrimaryKey PrimaryKey
ResponseInfo
}
type DeleteRowResponse struct {
ConsumedCapacityUnit *ConsumedCapacityUnit
ResponseInfo
}
type UpdateRowResponse struct {
Columns []*AttributeColumn
ConsumedCapacityUnit *ConsumedCapacityUnit
ResponseInfo
}
type PrimaryKeyType int32
const (
PrimaryKeyType_INTEGER PrimaryKeyType = 1
PrimaryKeyType_STRING PrimaryKeyType = 2
PrimaryKeyType_BINARY PrimaryKeyType = 3
)
const (
DefaultRetryInterval = 10
MaxRetryInterval = 320
)
type PrimaryKeyOption int32
const (
NONE PrimaryKeyOption = 0
AUTO_INCREMENT PrimaryKeyOption = 1
MIN PrimaryKeyOption = 2
MAX PrimaryKeyOption = 3
)
type PrimaryKeyColumn struct {
ColumnName string
Value interface{}
PrimaryKeyOption PrimaryKeyOption
}
func (this *PrimaryKeyColumn) String() string {
xs := make([]string, 0)
xs = append(xs, fmt.Sprintf("\"Name\": \"%s\"", this.ColumnName))
switch this.PrimaryKeyOption {
case NONE:
xs = append(xs, fmt.Sprintf("\"Value\": \"%s\"", this.Value))
case MIN:
xs = append(xs, "\"Value\": -inf")
case MAX:
xs = append(xs, "\"Value\": +inf")
case AUTO_INCREMENT:
xs = append(xs, "\"Value\": auto-incr")
}
return fmt.Sprintf("{%s}", strings.Join(xs, ", "))
}
type AttributeColumn struct {
ColumnName string
Value interface{}
Timestamp int64
}
type TimeRange struct {
Start int64
End int64
Specific int64
}
type ColumnToUpdate struct {
ColumnName string
Type byte
Timestamp int64
HasType bool
HasTimestamp bool
IgnoreValue bool
Value interface{}
}
type RowExistenceExpectation int
const (
RowExistenceExpectation_IGNORE RowExistenceExpectation = 0
RowExistenceExpectation_EXPECT_EXIST RowExistenceExpectation = 1
RowExistenceExpectation_EXPECT_NOT_EXIST RowExistenceExpectation = 2
)
type ComparatorType int32
const (
CT_EQUAL ComparatorType = 1
CT_NOT_EQUAL ComparatorType = 2
CT_GREATER_THAN ComparatorType = 3
CT_GREATER_EQUAL ComparatorType = 4
CT_LESS_THAN ComparatorType = 5
CT_LESS_EQUAL ComparatorType = 6
)
type LogicalOperator int32
const (
LO_NOT LogicalOperator = 1
LO_AND LogicalOperator = 2
LO_OR LogicalOperator = 3
)
type FilterType int32
const (
FT_SINGLE_COLUMN_VALUE FilterType = 1
FT_COMPOSITE_COLUMN_VALUE FilterType = 2
FT_COLUMN_PAGINATION FilterType = 3
)
type ColumnFilter interface {
Serialize() []byte
ToFilter() *otsprotocol.Filter
}
type VariantType int32
const (
Variant_INTEGER VariantType = 0;
Variant_DOUBLE VariantType = 1;
//VT_BOOLEAN = 2;
Variant_STRING VariantType = 3;
)
type ValueTransferRule struct {
Regex string
Cast_type VariantType
}
type SingleColumnCondition struct {
Comparator *ComparatorType
ColumnName *string
ColumnValue interface{} //[]byte
FilterIfMissing bool
LatestVersionOnly bool
TransferRule *ValueTransferRule
}
type ReturnType int32
const (
ReturnType_RT_NONE ReturnType = 0
ReturnType_RT_PK ReturnType = 1
ReturnType_RT_AFTER_MODIFY ReturnType = 2
)
type PaginationFilter struct {
Offset int32
Limit int32
}
type CompositeColumnValueFilter struct {
Operator LogicalOperator
Filters []ColumnFilter
}
func (ccvfilter *CompositeColumnValueFilter) Serialize() []byte {
result, _ := proto.Marshal(ccvfilter.ToFilter())
return result
}
func (ccvfilter *CompositeColumnValueFilter) ToFilter() *otsprotocol.Filter {
compositefilter := NewCompositeFilter(ccvfilter.Filters, ccvfilter.Operator)
compositeFilterToBytes, _ := proto.Marshal(compositefilter)
filter := new(otsprotocol.Filter)
filter.Type = otsprotocol.FilterType_FT_COMPOSITE_COLUMN_VALUE.Enum()
filter.Filter = compositeFilterToBytes
return filter
}
func (ccvfilter *CompositeColumnValueFilter) AddFilter(filter ColumnFilter) {
ccvfilter.Filters = append(ccvfilter.Filters, filter)
}
func (condition *SingleColumnCondition) ToFilter() *otsprotocol.Filter {
singlefilter := NewSingleColumnValueFilter(condition)
singleFilterToBytes, _ := proto.Marshal(singlefilter)
filter := new(otsprotocol.Filter)
filter.Type = otsprotocol.FilterType_FT_SINGLE_COLUMN_VALUE.Enum()
filter.Filter = singleFilterToBytes
return filter
}
func (condition *SingleColumnCondition) Serialize() []byte {
result, _ := proto.Marshal(condition.ToFilter())
return result
}
func (pageFilter *PaginationFilter) ToFilter() *otsprotocol.Filter {
compositefilter := NewPaginationFilter(pageFilter)
compositeFilterToBytes, _ := proto.Marshal(compositefilter)
filter := new(otsprotocol.Filter)
filter.Type = otsprotocol.FilterType_FT_COLUMN_PAGINATION.Enum()
filter.Filter = compositeFilterToBytes
return filter
}
func (pageFilter *PaginationFilter) Serialize() []byte {
result, _ := proto.Marshal(pageFilter.ToFilter())
return result
}
func NewTableOptionWithMaxVersion(maxVersion int) *TableOption {
tableOption := new(TableOption)
tableOption.TimeToAlive = -1
tableOption.MaxVersion = maxVersion
return tableOption
}
func NewTableOption(timeToAlive int, maxVersion int) *TableOption {
tableOption := new(TableOption)
tableOption.TimeToAlive = timeToAlive
tableOption.MaxVersion = maxVersion
return tableOption
}
type RowCondition struct {
RowExistenceExpectation RowExistenceExpectation
ColumnCondition ColumnFilter
}
type PutRowChange struct {
TableName string
PrimaryKey *PrimaryKey
Columns []AttributeColumn
Condition *RowCondition
ReturnType ReturnType
TransactionId *string
}
type PutRowRequest struct {
PutRowChange *PutRowChange
}
type DeleteRowChange struct {
TableName string
PrimaryKey *PrimaryKey
Condition *RowCondition
TransactionId *string
}
type DeleteRowRequest struct {
DeleteRowChange *DeleteRowChange
}
type SingleRowQueryCriteria struct {
ColumnsToGet []string
TableName string
PrimaryKey *PrimaryKey
MaxVersion int32
TimeRange *TimeRange
Filter ColumnFilter
StartColumn *string
EndColumn *string
TransactionId *string
}
type UpdateRowChange struct {
TableName string
PrimaryKey *PrimaryKey
Columns []ColumnToUpdate
Condition *RowCondition
TransactionId *string
ReturnType ReturnType
ColumnNamesToReturn []string
}
type UpdateRowRequest struct {
UpdateRowChange *UpdateRowChange
}
func (rowQueryCriteria *SingleRowQueryCriteria) AddColumnToGet(columnName string) {
rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName)
}
func (rowQueryCriteria *SingleRowQueryCriteria) SetStartColumn(columnName string) {
rowQueryCriteria.StartColumn = &columnName
}
func (rowQueryCriteria *SingleRowQueryCriteria) SetEndtColumn(columnName string) {
rowQueryCriteria.EndColumn = &columnName
}
func (rowQueryCriteria *SingleRowQueryCriteria) getColumnsToGet() []string {
return rowQueryCriteria.ColumnsToGet
}
func (rowQueryCriteria *MultiRowQueryCriteria) AddColumnToGet(columnName string) {
rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName)
}
func (rowQueryCriteria *RangeRowQueryCriteria) AddColumnToGet(columnName string) {
rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName)
}
func (rowQueryCriteria *MultiRowQueryCriteria) AddRow(pk *PrimaryKey) {
rowQueryCriteria.PrimaryKey = append(rowQueryCriteria.PrimaryKey, pk)
}
type GetRowRequest struct {
SingleRowQueryCriteria *SingleRowQueryCriteria
}
type MultiRowQueryCriteria struct {
PrimaryKey []*PrimaryKey
ColumnsToGet []string
TableName string
MaxVersion int
TimeRange *TimeRange
Filter ColumnFilter
StartColumn *string
EndColumn *string
}
type BatchGetRowRequest struct {
MultiRowQueryCriteria []*MultiRowQueryCriteria
}
type ColumnMap struct {
Columns map[string][]*AttributeColumn
columnsKey []string
}
type GetRowResponse struct {
PrimaryKey PrimaryKey
Columns []*AttributeColumn
ConsumedCapacityUnit *ConsumedCapacityUnit
columnMap *ColumnMap
ResponseInfo
}
type Error struct {
Code string
Message string
}
type RowResult struct {
TableName string
IsSucceed bool
Error Error
PrimaryKey PrimaryKey
Columns []*AttributeColumn
ConsumedCapacityUnit *ConsumedCapacityUnit
Index int32
}
type RowChange interface {
Serialize() []byte
getOperationType() otsprotocol.OperationType
getCondition() *otsprotocol.Condition
GetTableName() string
}
type BatchGetRowResponse struct {
TableToRowsResult map[string][]RowResult
ResponseInfo
}
type BatchWriteRowRequest struct {
RowChangesGroupByTable map[string][]RowChange
}
type BatchWriteRowResponse struct {
TableToRowsResult map[string][]RowResult
ResponseInfo
}
type Direction int32
const (
FORWARD Direction = 0
BACKWARD Direction = 1
)
type RangeRowQueryCriteria struct {
TableName string
StartPrimaryKey *PrimaryKey
EndPrimaryKey *PrimaryKey
ColumnsToGet []string
MaxVersion int32
TimeRange *TimeRange
Filter ColumnFilter
Direction Direction
Limit int32
StartColumn *string
EndColumn *string
TransactionId *string
}
type GetRangeRequest struct {
RangeRowQueryCriteria *RangeRowQueryCriteria
}
type Row struct {
PrimaryKey *PrimaryKey
Columns []*AttributeColumn
}
type GetRangeResponse struct {
Rows []*Row
ConsumedCapacityUnit *ConsumedCapacityUnit
NextStartPrimaryKey *PrimaryKey
ResponseInfo
}
type ListStreamRequest struct {
TableName *string
}
type Stream struct {
Id *StreamId
TableName *string
CreationTime int64
}
type ListStreamResponse struct {
Streams []Stream
ResponseInfo
}
type StreamSpecification struct {
EnableStream bool
ExpirationTime int32 // must be positive. in hours
}
type StreamDetails struct {
EnableStream bool
StreamId *StreamId // nil when stream is disabled.
ExpirationTime int32 // in hours
LastEnableTime int64 // the last time stream is enabled, in usec
}
type DescribeStreamRequest struct {
StreamId *StreamId // required
InclusiveStartShardId *ShardId // optional
ShardLimit *int32 // optional
}
type DescribeStreamResponse struct {
StreamId *StreamId // required
ExpirationTime int32 // in hours
TableName *string // required
CreationTime int64 // in usec
Status StreamStatus // required
Shards []*StreamShard
NextShardId *ShardId // optional. nil means "no more shards"
ResponseInfo
}
type GetShardIteratorRequest struct {
StreamId *StreamId // required
ShardId *ShardId // required
Timestamp *int64
Token *string
}
type GetShardIteratorResponse struct {
ShardIterator *ShardIterator // required
Token *string
ResponseInfo
}
type GetStreamRecordRequest struct {
ShardIterator *ShardIterator // required
Limit *int32 // optional. max records which will reside in response
}
type GetStreamRecordResponse struct {
Records []*StreamRecord
NextShardIterator *ShardIterator // optional. an indicator to be used to read more records in this shard
ResponseInfo
}
type ComputeSplitPointsBySizeRequest struct {
TableName string
SplitSize int64
}
type ComputeSplitPointsBySizeResponse struct {
SchemaEntry []*PrimaryKeySchema
Splits []*Split
ResponseInfo
}
type Split struct {
LowerBound *PrimaryKey
UpperBound *PrimaryKey
Location string
}
type StreamId string
type ShardId string
type ShardIterator string
type StreamStatus int
const (
SS_Enabling StreamStatus = iota
SS_Active
)
/*
* Shards are possibly splitted into two or merged from two.
* After splitting, both newly generated shards have the same FatherShard.
* After merging, the newly generated shard have both FatherShard and MotherShard.
*/
type StreamShard struct {
SelfShard *ShardId // required
FatherShard *ShardId // optional
MotherShard *ShardId // optional
}
type StreamRecord struct {
Type ActionType
Info *RecordSequenceInfo // required
PrimaryKey *PrimaryKey // required
Columns []*RecordColumn
}
func (this *StreamRecord) String() string {
return fmt.Sprintf(
"{\"Type\":%s, \"PrimaryKey\":%s, \"Info\":%s, \"Columns\":%s}",
this.Type,
*this.PrimaryKey,
this.Info,
this.Columns)
}
type ActionType int
const (
AT_Put ActionType = iota
AT_Update
AT_Delete
)
func (this ActionType) String() string {
switch this {
case AT_Put:
return "\"PutRow\""
case AT_Update:
return "\"UpdateRow\""
case AT_Delete:
return "\"DeleteRow\""
default:
panic(fmt.Sprintf("unknown action type: %d", int(this)))
}
}
type RecordSequenceInfo struct {
Epoch int32
Timestamp int64
RowIndex int32
}
func (this *RecordSequenceInfo) String() string {
return fmt.Sprintf(
"{\"Epoch\":%d, \"Timestamp\": %d, \"RowIndex\": %d}",
this.Epoch,
this.Timestamp,
this.RowIndex)
}
type RecordColumn struct {
Type RecordColumnType
Name *string // required
Value interface{} // optional. present when Type is RCT_Put
Timestamp *int64 // optional, in msec. present when Type is RCT_Put or RCT_DeleteOneVersion
}
func (this *RecordColumn) String() string {
xs := make([]string, 0)
xs = append(xs, fmt.Sprintf("\"Name\":%s", strconv.Quote(*this.Name)))
switch this.Type {
case RCT_DeleteAllVersions:
xs = append(xs, "\"Type\":\"DeleteAllVersions\"")
case RCT_DeleteOneVersion:
xs = append(xs, "\"Type\":\"DeleteOneVersion\"")
xs = append(xs, fmt.Sprintf("\"Timestamp\":%d", *this.Timestamp))
case RCT_Put:
xs = append(xs, "\"Type\":\"Put\"")
xs = append(xs, fmt.Sprintf("\"Timestamp\":%d", *this.Timestamp))
xs = append(xs, fmt.Sprintf("\"Value\":%s", this.Value))
}
return fmt.Sprintf("{%s}", strings.Join(xs, ", "))
}
type RecordColumnType int
const (
RCT_Put RecordColumnType = iota
RCT_DeleteOneVersion
RCT_DeleteAllVersions
)
type IndexMeta struct {
IndexName string
Primarykey []string
DefinedColumns []string
IndexType IndexType
}
type DefinedColumnSchema struct {
Name string
ColumnType DefinedColumnType
}
type IndexType int32
const (
IT_GLOBAL_INDEX IndexType = 1
IT_LOCAL_INDEX IndexType = 2
)
type DefinedColumnType int32
const (
/**
* 64位整数
*/
DefinedColumn_INTEGER DefinedColumnType = 1
/**
* 浮点数
*/
DefinedColumn_DOUBLE DefinedColumnType = 2
/**
* 布尔值
*/
DefinedColumn_BOOLEAN DefinedColumnType = 3
/**
* 字符串
*/
DefinedColumn_STRING DefinedColumnType = 4
/**
* BINARY
*/
DefinedColumn_BINARY DefinedColumnType = 5
)
type StartLocalTransactionRequest struct {
PrimaryKey *PrimaryKey
TableName string
}
type StartLocalTransactionResponse struct {
TransactionId *string
ResponseInfo
}
type CommitTransactionRequest struct {
TransactionId *string
}
type CommitTransactionResponse struct {
ResponseInfo
}
type AbortTransactionRequest struct {
TransactionId *string
}
type AbortTransactionResponse struct {
ResponseInfo
}

View File

@ -0,0 +1,124 @@
package tablestore
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"hash"
"sort"
"strings"
)
const (
xOtsDate = "x-ots-date"
xOtsApiversion = "x-ots-apiversion"
xOtsAccesskeyid = "x-ots-accesskeyid"
xOtsContentmd5 = "x-ots-contentmd5"
xOtsHeaderStsToken = "x-ots-ststoken"
xOtsSignature = "x-ots-signature"
xOtsRequestCompressType = "x-ots-request-compress-type"
xOtsRequestCompressSize = "x-ots-request-compress-size"
xOtsResponseCompressTye = "x-ots-response-compress-type"
)
type otsHeader struct {
name string
value string
must bool
}
type otsHeaders struct {
headers []*otsHeader
hmacSha1 hash.Hash
}
func createOtsHeaders(accessKey string) *otsHeaders {
h := new(otsHeaders)
h.headers = []*otsHeader{
&otsHeader{name: xOtsDate, must: true},
&otsHeader{name: xOtsApiversion, must: true},
&otsHeader{name: xOtsAccesskeyid, must: true},
&otsHeader{name: xOtsContentmd5, must: true},
&otsHeader{name: xOtsInstanceName, must: true},
&otsHeader{name: xOtsSignature, must: true},
&otsHeader{name: xOtsRequestCompressSize, must: false},
&otsHeader{name: xOtsResponseCompressTye, must: false},
&otsHeader{name: xOtsRequestCompressType, must: false},
&otsHeader{name: xOtsHeaderStsToken, must: false},
}
sort.Sort(h)
h.hmacSha1 = hmac.New(sha1.New, []byte(accessKey))
return h
}
func (h *otsHeaders) Len() int {
return len(h.headers)
}
func (h *otsHeaders) Swap(i, j int) {
h.headers[i], h.headers[j] = h.headers[j], h.headers[i]
}
func (h *otsHeaders) Less(i, j int) bool {
if h.headers[i].name == xOtsSignature {
return false
}
if h.headers[j].name == xOtsSignature {
return true
}
return h.headers[i].name < h.headers[j].name
}
func (h *otsHeaders) search(name string) *otsHeader {
index := sort.Search(len(h.headers)-1, func(i int) bool {
return h.headers[i].name >= name
})
if index >= len(h.headers) {
return nil
}
return h.headers[index]
}
func (h *otsHeaders) set(name, value string) {
header := h.search(name)
if header == nil {
return
}
header.value = value
}
func (h *otsHeaders) signature(uri, method, accessKey string) (string, error) {
for _, header := range h.headers[:len(h.headers)-1] {
if header.must && header.value == "" {
return "", errMissMustHeader(header.name)
}
}
// StringToSign = CanonicalURI + '\n' + HTTPRequestMethod + '\n' + CanonicalQueryString + '\n' + CanonicalHeaders + '\n'
// TODO CanonicalQueryString 为空
stringToSign := uri + "\n" + method + "\n" + "\n"
// 最后一个header 为 xOtsSignature
for _, header := range h.headers[:len(h.headers)-1] {
if header.value != "" {
stringToSign = stringToSign + header.name + ":" + strings.TrimSpace(header.value) + "\n"
}
}
h.hmacSha1.Reset()
h.hmacSha1.Write([]byte(stringToSign))
// fmt.Println("stringToSign:" + stringToSign)
sign := base64.StdEncoding.EncodeToString(h.hmacSha1.Sum(nil))
h.set(xOtsSignature, sign)
// fmt.Println("sign:" + sign)
return sign, nil
}

View File

@ -0,0 +1 @@
protoc --go_out=. search.proto ots_filter.proto table_store.proto

View File

@ -0,0 +1,390 @@
// Code generated by protoc-gen-go.
// source: ots_filter.proto
// DO NOT EDIT!
package otsprotocol
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type VariantType int32
const (
VariantType_VT_INTEGER VariantType = 0
VariantType_VT_DOUBLE VariantType = 1
// VT_BOOLEAN = 2;
VariantType_VT_STRING VariantType = 3
VariantType_VT_NULL VariantType = 6
VariantType_VT_BLOB VariantType = 7
)
var VariantType_name = map[int32]string{
0: "VT_INTEGER",
1: "VT_DOUBLE",
3: "VT_STRING",
6: "VT_NULL",
7: "VT_BLOB",
}
var VariantType_value = map[string]int32{
"VT_INTEGER": 0,
"VT_DOUBLE": 1,
"VT_STRING": 3,
"VT_NULL": 6,
"VT_BLOB": 7,
}
func (x VariantType) Enum() *VariantType {
p := new(VariantType)
*p = x
return p
}
func (x VariantType) String() string {
return proto.EnumName(VariantType_name, int32(x))
}
func (x *VariantType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(VariantType_value, data, "VariantType")
if err != nil {
return err
}
*x = VariantType(value)
return nil
}
func (VariantType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
type FilterType int32
const (
FilterType_FT_SINGLE_COLUMN_VALUE FilterType = 1
FilterType_FT_COMPOSITE_COLUMN_VALUE FilterType = 2
FilterType_FT_COLUMN_PAGINATION FilterType = 3
)
var FilterType_name = map[int32]string{
1: "FT_SINGLE_COLUMN_VALUE",
2: "FT_COMPOSITE_COLUMN_VALUE",
3: "FT_COLUMN_PAGINATION",
}
var FilterType_value = map[string]int32{
"FT_SINGLE_COLUMN_VALUE": 1,
"FT_COMPOSITE_COLUMN_VALUE": 2,
"FT_COLUMN_PAGINATION": 3,
}
func (x FilterType) Enum() *FilterType {
p := new(FilterType)
*p = x
return p
}
func (x FilterType) String() string {
return proto.EnumName(FilterType_name, int32(x))
}
func (x *FilterType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FilterType_value, data, "FilterType")
if err != nil {
return err
}
*x = FilterType(value)
return nil
}
func (FilterType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
type ComparatorType int32
const (
ComparatorType_CT_EQUAL ComparatorType = 1
ComparatorType_CT_NOT_EQUAL ComparatorType = 2
ComparatorType_CT_GREATER_THAN ComparatorType = 3
ComparatorType_CT_GREATER_EQUAL ComparatorType = 4
ComparatorType_CT_LESS_THAN ComparatorType = 5
ComparatorType_CT_LESS_EQUAL ComparatorType = 6
)
var ComparatorType_name = map[int32]string{
1: "CT_EQUAL",
2: "CT_NOT_EQUAL",
3: "CT_GREATER_THAN",
4: "CT_GREATER_EQUAL",
5: "CT_LESS_THAN",
6: "CT_LESS_EQUAL",
}
var ComparatorType_value = map[string]int32{
"CT_EQUAL": 1,
"CT_NOT_EQUAL": 2,
"CT_GREATER_THAN": 3,
"CT_GREATER_EQUAL": 4,
"CT_LESS_THAN": 5,
"CT_LESS_EQUAL": 6,
}
func (x ComparatorType) Enum() *ComparatorType {
p := new(ComparatorType)
*p = x
return p
}
func (x ComparatorType) String() string {
return proto.EnumName(ComparatorType_name, int32(x))
}
func (x *ComparatorType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(ComparatorType_value, data, "ComparatorType")
if err != nil {
return err
}
*x = ComparatorType(value)
return nil
}
func (ComparatorType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
type LogicalOperator int32
const (
LogicalOperator_LO_NOT LogicalOperator = 1
LogicalOperator_LO_AND LogicalOperator = 2
LogicalOperator_LO_OR LogicalOperator = 3
)
var LogicalOperator_name = map[int32]string{
1: "LO_NOT",
2: "LO_AND",
3: "LO_OR",
}
var LogicalOperator_value = map[string]int32{
"LO_NOT": 1,
"LO_AND": 2,
"LO_OR": 3,
}
func (x LogicalOperator) Enum() *LogicalOperator {
p := new(LogicalOperator)
*p = x
return p
}
func (x LogicalOperator) String() string {
return proto.EnumName(LogicalOperator_name, int32(x))
}
func (x *LogicalOperator) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(LogicalOperator_value, data, "LogicalOperator")
if err != nil {
return err
}
*x = LogicalOperator(value)
return nil
}
func (LogicalOperator) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
type ValueTransferRule struct {
Regex *string `protobuf:"bytes,1,req,name=regex" json:"regex,omitempty"`
CastType *VariantType `protobuf:"varint,2,opt,name=cast_type,enum=otsprotocol.VariantType" json:"cast_type,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *ValueTransferRule) Reset() { *m = ValueTransferRule{} }
func (m *ValueTransferRule) String() string { return proto.CompactTextString(m) }
func (*ValueTransferRule) ProtoMessage() {}
func (*ValueTransferRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *ValueTransferRule) GetRegex() string {
if m != nil && m.Regex != nil {
return *m.Regex
}
return ""
}
func (m *ValueTransferRule) GetCastType() VariantType {
if m != nil && m.CastType != nil {
return *m.CastType
}
return VariantType_VT_INTEGER
}
type SingleColumnValueFilter struct {
Comparator *ComparatorType `protobuf:"varint,1,req,name=comparator,enum=otsprotocol.ComparatorType" json:"comparator,omitempty"`
ColumnName *string `protobuf:"bytes,2,req,name=column_name" json:"column_name,omitempty"`
ColumnValue []byte `protobuf:"bytes,3,req,name=column_value" json:"column_value,omitempty"`
FilterIfMissing *bool `protobuf:"varint,4,req,name=filter_if_missing" json:"filter_if_missing,omitempty"`
LatestVersionOnly *bool `protobuf:"varint,5,req,name=latest_version_only" json:"latest_version_only,omitempty"`
ValueTransRule *ValueTransferRule `protobuf:"bytes,6,opt,name=value_trans_rule" json:"value_trans_rule,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *SingleColumnValueFilter) Reset() { *m = SingleColumnValueFilter{} }
func (m *SingleColumnValueFilter) String() string { return proto.CompactTextString(m) }
func (*SingleColumnValueFilter) ProtoMessage() {}
func (*SingleColumnValueFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
func (m *SingleColumnValueFilter) GetComparator() ComparatorType {
if m != nil && m.Comparator != nil {
return *m.Comparator
}
return ComparatorType_CT_EQUAL
}
func (m *SingleColumnValueFilter) GetColumnName() string {
if m != nil && m.ColumnName != nil {
return *m.ColumnName
}
return ""
}
func (m *SingleColumnValueFilter) GetColumnValue() []byte {
if m != nil {
return m.ColumnValue
}
return nil
}
func (m *SingleColumnValueFilter) GetFilterIfMissing() bool {
if m != nil && m.FilterIfMissing != nil {
return *m.FilterIfMissing
}
return false
}
func (m *SingleColumnValueFilter) GetLatestVersionOnly() bool {
if m != nil && m.LatestVersionOnly != nil {
return *m.LatestVersionOnly
}
return false
}
func (m *SingleColumnValueFilter) GetValueTransRule() *ValueTransferRule {
if m != nil {
return m.ValueTransRule
}
return nil
}
type CompositeColumnValueFilter struct {
Combinator *LogicalOperator `protobuf:"varint,1,req,name=combinator,enum=otsprotocol.LogicalOperator" json:"combinator,omitempty"`
SubFilters []*Filter `protobuf:"bytes,2,rep,name=sub_filters" json:"sub_filters,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CompositeColumnValueFilter) Reset() { *m = CompositeColumnValueFilter{} }
func (m *CompositeColumnValueFilter) String() string { return proto.CompactTextString(m) }
func (*CompositeColumnValueFilter) ProtoMessage() {}
func (*CompositeColumnValueFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
func (m *CompositeColumnValueFilter) GetCombinator() LogicalOperator {
if m != nil && m.Combinator != nil {
return *m.Combinator
}
return LogicalOperator_LO_NOT
}
func (m *CompositeColumnValueFilter) GetSubFilters() []*Filter {
if m != nil {
return m.SubFilters
}
return nil
}
type ColumnPaginationFilter struct {
Offset *int32 `protobuf:"varint,1,req,name=offset" json:"offset,omitempty"`
Limit *int32 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *ColumnPaginationFilter) Reset() { *m = ColumnPaginationFilter{} }
func (m *ColumnPaginationFilter) String() string { return proto.CompactTextString(m) }
func (*ColumnPaginationFilter) ProtoMessage() {}
func (*ColumnPaginationFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
func (m *ColumnPaginationFilter) GetOffset() int32 {
if m != nil && m.Offset != nil {
return *m.Offset
}
return 0
}
func (m *ColumnPaginationFilter) GetLimit() int32 {
if m != nil && m.Limit != nil {
return *m.Limit
}
return 0
}
type Filter struct {
Type *FilterType `protobuf:"varint,1,req,name=type,enum=otsprotocol.FilterType" json:"type,omitempty"`
Filter []byte `protobuf:"bytes,2,req,name=filter" json:"filter,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Filter) Reset() { *m = Filter{} }
func (m *Filter) String() string { return proto.CompactTextString(m) }
func (*Filter) ProtoMessage() {}
func (*Filter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
func (m *Filter) GetType() FilterType {
if m != nil && m.Type != nil {
return *m.Type
}
return FilterType_FT_SINGLE_COLUMN_VALUE
}
func (m *Filter) GetFilter() []byte {
if m != nil {
return m.Filter
}
return nil
}
func init() {
proto.RegisterType((*ValueTransferRule)(nil), "otsprotocol.ValueTransferRule")
proto.RegisterType((*SingleColumnValueFilter)(nil), "otsprotocol.SingleColumnValueFilter")
proto.RegisterType((*CompositeColumnValueFilter)(nil), "otsprotocol.CompositeColumnValueFilter")
proto.RegisterType((*ColumnPaginationFilter)(nil), "otsprotocol.ColumnPaginationFilter")
proto.RegisterType((*Filter)(nil), "otsprotocol.Filter")
proto.RegisterEnum("otsprotocol.VariantType", VariantType_name, VariantType_value)
proto.RegisterEnum("otsprotocol.FilterType", FilterType_name, FilterType_value)
proto.RegisterEnum("otsprotocol.ComparatorType", ComparatorType_name, ComparatorType_value)
proto.RegisterEnum("otsprotocol.LogicalOperator", LogicalOperator_name, LogicalOperator_value)
}
func init() { proto.RegisterFile("ots_filter.proto", fileDescriptor1) }
var fileDescriptor1 = []byte{
// 585 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x92, 0x51, 0x6b, 0xdb, 0x30,
0x14, 0x85, 0x67, 0xa7, 0x71, 0x9b, 0xeb, 0x34, 0x55, 0x95, 0xd2, 0xba, 0xed, 0x36, 0x42, 0x60,
0x60, 0x32, 0xe8, 0x46, 0x18, 0x6c, 0x6f, 0xc3, 0x75, 0xdd, 0x2c, 0xe0, 0xda, 0x5d, 0xa2, 0xf8,
0x55, 0xb8, 0x41, 0x09, 0x02, 0xc7, 0x0a, 0x96, 0x52, 0xda, 0xb7, 0xfd, 0xdb, 0xfd, 0x8d, 0x61,
0xd9, 0x1d, 0x69, 0xe8, 0x9b, 0x75, 0xef, 0xd5, 0x77, 0xee, 0xd1, 0x31, 0x20, 0xa1, 0x24, 0x5d,
0xf0, 0x4c, 0xb1, 0xe2, 0x6a, 0x5d, 0x08, 0x25, 0xb0, 0x2d, 0x94, 0xd4, 0x5f, 0x73, 0x91, 0xf5,
0x63, 0x38, 0x4e, 0xd2, 0x6c, 0xc3, 0x48, 0x91, 0xe6, 0x72, 0xc1, 0x8a, 0xc9, 0x26, 0x63, 0xf8,
0x10, 0x9a, 0x05, 0x5b, 0xb2, 0x27, 0xc7, 0xe8, 0x99, 0x6e, 0x0b, 0x7f, 0x86, 0xd6, 0x3c, 0x95,
0x8a, 0xaa, 0xe7, 0x35, 0x73, 0xcc, 0x9e, 0xe1, 0x76, 0x86, 0xce, 0xd5, 0x16, 0xe4, 0x2a, 0x49,
0x0b, 0x9e, 0xe6, 0x8a, 0x3c, 0xaf, 0x59, 0xff, 0xaf, 0x01, 0x67, 0x53, 0x9e, 0x2f, 0x33, 0xe6,
0x8b, 0x6c, 0xb3, 0xca, 0x35, 0xfd, 0x56, 0xeb, 0xe3, 0x2f, 0x00, 0x73, 0xb1, 0x5a, 0xa7, 0x45,
0xaa, 0x44, 0xa1, 0xe1, 0x9d, 0xe1, 0xe5, 0x2b, 0x92, 0xff, 0xbf, 0x5d, 0xc2, 0x70, 0x17, 0xec,
0xb9, 0xa6, 0xd0, 0x3c, 0x5d, 0x95, 0xda, 0xe5, 0x3a, 0x27, 0xd0, 0xae, 0x8b, 0x8f, 0x25, 0xdb,
0x69, 0xf4, 0x4c, 0xb7, 0x8d, 0xcf, 0xe1, 0xb8, 0x72, 0x49, 0xf9, 0x82, 0xae, 0xb8, 0x94, 0x3c,
0x5f, 0x3a, 0x7b, 0x3d, 0xd3, 0x3d, 0xc0, 0x97, 0xd0, 0xcd, 0x52, 0xc5, 0xa4, 0xa2, 0x8f, 0xac,
0x90, 0x5c, 0xe4, 0x54, 0xe4, 0xd9, 0xb3, 0xd3, 0xd4, 0xcd, 0x1f, 0x80, 0x34, 0x86, 0xaa, 0xf2,
0x05, 0x68, 0xb1, 0xc9, 0x98, 0x63, 0xf5, 0x0c, 0xd7, 0x1e, 0x7e, 0xdc, 0xf1, 0xb8, 0xf3, 0x4a,
0xfd, 0x27, 0xb8, 0x28, 0xd7, 0x15, 0x92, 0xab, 0x37, 0xbc, 0x7e, 0xd5, 0x5e, 0x1f, 0x78, 0xbe,
0xe5, 0xf5, 0xfd, 0x2b, 0x62, 0x28, 0x96, 0x7c, 0x9e, 0x66, 0xf1, 0x9a, 0x69, 0xc3, 0xd8, 0x05,
0x5b, 0x6e, 0x1e, 0xea, 0xac, 0xa4, 0x63, 0xf6, 0x1a, 0xae, 0x3d, 0xec, 0xbe, 0xba, 0x52, 0xb1,
0xfb, 0xdf, 0xe1, 0xb4, 0x12, 0xbc, 0x4f, 0x97, 0xa5, 0x00, 0x17, 0x79, 0xad, 0xda, 0x01, 0x4b,
0x2c, 0x16, 0x92, 0x29, 0xad, 0xd8, 0x2c, 0x93, 0xcc, 0xf8, 0x8a, 0x2b, 0xfd, 0x74, 0xcd, 0xfe,
0x4f, 0xb0, 0xea, 0xc1, 0x4f, 0xb0, 0xa7, 0xe3, 0xac, 0x16, 0x3b, 0x7b, 0x43, 0x45, 0x07, 0xd0,
0x01, 0xab, 0xda, 0x47, 0x03, 0xda, 0x83, 0x19, 0xd8, 0x5b, 0x61, 0xe3, 0x0e, 0x40, 0x42, 0xe8,
0x38, 0x22, 0xc1, 0x28, 0x98, 0xa0, 0x77, 0xf8, 0x10, 0x5a, 0x09, 0xa1, 0x37, 0xf1, 0xec, 0x3a,
0x0c, 0x90, 0x51, 0x1f, 0xa7, 0x64, 0x32, 0x8e, 0x46, 0xa8, 0x81, 0x6d, 0xd8, 0x4f, 0x08, 0x8d,
0x66, 0x61, 0x88, 0xac, 0xfa, 0x70, 0x1d, 0xc6, 0xd7, 0x68, 0x7f, 0x90, 0x02, 0x6c, 0x89, 0x5e,
0xc0, 0xe9, 0x2d, 0xa1, 0xd3, 0x71, 0x34, 0x0a, 0x03, 0xea, 0xc7, 0xe1, 0xec, 0x2e, 0xa2, 0x89,
0x17, 0xce, 0x4a, 0xe4, 0x07, 0x38, 0xbf, 0x25, 0xd4, 0x8f, 0xef, 0xee, 0xe3, 0xe9, 0x98, 0xec,
0xb4, 0x4d, 0xec, 0xc0, 0x89, 0x6e, 0xeb, 0xe2, 0xbd, 0x37, 0x1a, 0x47, 0x1e, 0x19, 0xc7, 0x11,
0x6a, 0x0c, 0xfe, 0x18, 0xd0, 0xd9, 0xf9, 0xbb, 0xda, 0x70, 0xe0, 0x13, 0x1a, 0xfc, 0x9e, 0x79,
0x21, 0x32, 0x30, 0x82, 0xb6, 0x4f, 0x68, 0x14, 0xbf, 0x54, 0x4c, 0xdc, 0x85, 0x23, 0x9f, 0xd0,
0xd1, 0x24, 0xf0, 0x48, 0x30, 0xa1, 0xe4, 0x97, 0x17, 0xa1, 0x06, 0x3e, 0x01, 0xb4, 0x55, 0xac,
0x46, 0xf7, 0xea, 0xcb, 0x61, 0x30, 0x9d, 0x56, 0x73, 0x4d, 0x7c, 0x0c, 0x87, 0x2f, 0x95, 0x6a,
0xc8, 0x1a, 0x7c, 0x83, 0xa3, 0xdd, 0xcc, 0x01, 0xac, 0x30, 0x2e, 0x45, 0x91, 0x51, 0x7f, 0x7b,
0xd1, 0x0d, 0x32, 0x71, 0x0b, 0x9a, 0x61, 0x4c, 0xe3, 0x09, 0x6a, 0xfc, 0x0b, 0x00, 0x00, 0xff,
0xff, 0xb3, 0x10, 0x19, 0xa7, 0xc1, 0x03, 0x00, 0x00,
}

View File

@ -0,0 +1,61 @@
syntax = "proto2";
package otsprotocol;
enum VariantType {
VT_INTEGER = 0;
VT_DOUBLE = 1;
//VT_BOOLEAN = 2;
VT_STRING = 3;
VT_NULL = 6;
VT_BLOB = 7;
}
message ValueTransferRule {
required string regex = 1;
optional VariantType cast_type = 2;
}
enum FilterType {
FT_SINGLE_COLUMN_VALUE = 1;
FT_COMPOSITE_COLUMN_VALUE = 2;
FT_COLUMN_PAGINATION = 3;
}
enum ComparatorType {
CT_EQUAL = 1;
CT_NOT_EQUAL = 2;
CT_GREATER_THAN = 3;
CT_GREATER_EQUAL = 4;
CT_LESS_THAN = 5;
CT_LESS_EQUAL = 6;
}
message SingleColumnValueFilter {
required ComparatorType comparator = 1;
required string column_name = 2;
required bytes column_value = 3; // Serialized SQLVariant
required bool filter_if_missing = 4;
required bool latest_version_only = 5;
optional ValueTransferRule value_trans_rule = 6;
}
enum LogicalOperator {
LO_NOT = 1;
LO_AND = 2;
LO_OR = 3;
}
message CompositeColumnValueFilter {
required LogicalOperator combinator = 1;
repeated Filter sub_filters = 2;
}
message ColumnPaginationFilter {
required int32 offset = 1;
required int32 limit = 2;
}
message Filter {
required FilterType type = 1;
required bytes filter = 2; // Serialized string of filter of the type
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,323 @@
syntax = "proto2";
package otsprotocol;
enum QueryType {
MATCH_QUERY = 1;
MATCH_PHRASE_QUERY = 2;
TERM_QUERY = 3;
RANGE_QUERY = 4;
PREFIX_QUERY = 5;
BOOL_QUERY = 6;
CONST_SCORE_QUERY = 7;
FUNCTION_SCORE_QUERY = 8;
NESTED_QUERY = 9;
WILDCARD_QUERY = 10;
MATCH_ALL_QUERY = 11;
GEO_BOUNDING_BOX_QUERY = 12;
GEO_DISTANCE_QUERY = 13;
GEO_POLYGON_QUERY = 14;
TERMS_QUERY = 15;
}
enum QueryOperator {
OR = 1;
AND = 2;
}
message MatchQuery {
optional string field_name = 1;
optional string text = 2;
optional int32 minimum_should_match = 3;
optional QueryOperator operator = 4;
}
message MatchPhraseQuery {
optional string field_name = 1;
optional string text = 2;
}
message MatchAllQuery {
}
message TermQuery {
optional string field_name = 1;
optional bytes term = 2;
}
message TermsQuery {
optional string field_name = 1;
repeated bytes terms = 2;
}
message RangeQuery {
optional string field_name = 1;
optional bytes range_from = 2; // variant value
optional bytes range_to = 3; // variant value
optional bool include_lower = 4;
optional bool include_upper = 5;
}
message PrefixQuery {
optional string field_name = 1;
optional string prefix = 2;
}
message WildcardQuery {
optional string field_name = 1;
optional string value = 2;
}
message BoolQuery {
repeated Query must_queries = 1;
repeated Query must_not_queries = 2;
repeated Query filter_queries = 3;
repeated Query should_queries = 4;
optional int32 minimum_should_match = 5;
}
message ConstScoreQuery {
optional Query filter = 1;
}
message FieldValueFactor {
optional string field_name = 1;
}
message FunctionScoreQuery {
optional Query query = 1;
optional FieldValueFactor field_value_factor = 2;
}
enum ScoreMode {
SCORE_MODE_NONE = 1;
SCORE_MODE_AVG = 2;
SCORE_MODE_MAX = 3;
SCORE_MODE_TOTAL = 4;
SCORE_MODE_MIN = 5;
}
message NestedQuery {
optional string path = 1;
optional Query query = 2;
optional ScoreMode score_mode = 3;
}
message GeoBoundingBoxQuery {
optional string field_name = 1;
optional string top_left = 2;
optional string bottom_right = 3;
}
message GeoDistanceQuery {
optional string field_name = 1;
optional string center_point = 2;
optional double distance = 3;
}
message GeoPolygonQuery {
optional string field_name = 1;
repeated string points = 2;
}
message Query {
optional QueryType type = 1;
optional bytes query = 2;
}
message Collapse {
optional string field_name = 1;
}
message NestedFilter {
optional string path = 1;
optional Query filter = 2;
}
enum SortOrder {
SORT_ORDER_ASC = 0;
SORT_ORDER_DESC = 1;
}
enum SortMode {
SORT_MODE_MIN = 0;
SORT_MODE_MAX = 1;
SORT_MODE_AVG = 2;
}
message ScoreSort {
optional SortOrder order = 1;
}
message FieldSort {
optional string field_name = 1;
optional SortOrder order = 2;
optional SortMode mode = 3;
optional NestedFilter nested_filter = 4;
}
enum GeoDistanceType {
GEO_DISTANCE_ARC = 0;
GEO_DISTANCE_PLANE = 1;
}
message GeoDistanceSort {
optional string field_name = 1;
repeated string points = 2;
optional SortOrder order = 3;
optional SortMode mode = 4;
optional GeoDistanceType distance_type = 5;
optional NestedFilter nested_filter = 6;
}
message PrimaryKeySort {
optional SortOrder order = 1;
}
message Sorter {
optional FieldSort field_sort = 1;
optional GeoDistanceSort geo_distance_sort = 2;
optional ScoreSort score_sort = 3;
optional PrimaryKeySort pk_sort = 4;
}
message Sort {
repeated Sorter sorter = 1;
}
message SearchQuery {
optional int32 offset = 1;
optional int32 limit = 2;
optional Query query = 4;
optional Collapse collapse = 5;
optional Sort sort = 6;
optional bool getTotalCount = 8;
optional bytes token = 9;
}
enum ColumnReturnType {
RETURN_ALL = 1;
RETURN_SPECIFIED = 2;
RETURN_NONE = 3;
}
message ColumnsToGet {
optional ColumnReturnType return_type = 1;
repeated string column_names = 2;
}
message SearchRequest {
optional string table_name = 1;
optional string index_name = 2;
optional ColumnsToGet columns_to_get = 3;
optional bytes search_query = 4;
repeated bytes routing_values = 5;
}
/**
* Response部分
**/
message SearchResponse {
optional int64 total_hits = 1;
repeated bytes rows = 2;
optional bool is_all_succeeded = 3;
optional bytes next_token = 6;
}
/* Create Search Index */
enum IndexOptions {
DOCS = 1;
FREQS = 2;
POSITIONS = 3;
OFFSETS = 4;
}
enum FieldType {
LONG = 1;
DOUBLE = 2;
BOOLEAN = 3;
KEYWORD = 4;
TEXT = 5;
NESTED = 6;
GEO_POINT = 7;
}
message FieldSchema {
optional string field_name = 1;
optional FieldType field_type = 2;
optional IndexOptions index_options = 3;
optional string analyzer = 4;
optional bool index = 5;
optional bool doc_values = 6;
optional bool store = 7;
repeated FieldSchema field_schemas = 8; // only for nested type
optional bool is_array = 9;
}
message IndexSchema {
repeated FieldSchema field_schemas = 1;
optional IndexSetting index_setting = 2;
optional Sort index_sort = 3;
}
message IndexSetting {
optional int32 number_of_shards = 1;
repeated string routing_fields = 2;
optional int32 routing_partition_size = 3;
}
message CreateSearchIndexRequest {
required string table_name = 1;
required string index_name = 2;
optional IndexSchema schema = 3;
}
message CreateSearchIndexResponse {
}
/* List Search Index */
message IndexInfo {
optional string table_name = 1;
optional string index_name = 2;
}
message ListSearchIndexRequest {
optional string table_name = 1;
}
message ListSearchIndexResponse {
repeated IndexInfo indices = 1;
}
/* Delete Search Index */
message DeleteSearchIndexRequest {
optional string table_name = 1;
optional string index_name = 2;
}
message DeleteSearchIndexResponse {
}
/* Describe Search Index */
enum SyncPhase {
FULL = 1;
INCR = 2;
}
message SyncStat {
optional SyncPhase sync_phase = 1;
optional int64 current_sync_timestamp = 2; // TunnelService
}
message DescribeSearchIndexRequest {
optional string table_name = 1;
optional string index_name = 2;
}
message DescribeSearchIndexResponse {
optional IndexSchema schema = 1;
optional SyncStat sync_stat = 2;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,616 @@
syntax = "proto2";
package otsprotocol;
message Error {
required string code = 1;
optional string message = 2;
}
enum PrimaryKeyType {
INTEGER = 1;
STRING = 2;
BINARY = 3;
}
enum PrimaryKeyOption {
AUTO_INCREMENT = 1;
}
message PrimaryKeySchema {
required string name = 1;
required PrimaryKeyType type = 2;
optional PrimaryKeyOption option = 3;
}
message PartitionRange {
required bytes begin = 1; // encoded as SQLVariant
required bytes end = 2; // encoded as SQLVariant
}
enum BloomFilterType {
NONE = 1;
CELL = 2;
ROW = 3;
}
message TableOptions {
optional int32 time_to_live = 1; //
optional int32 max_versions = 2; //
optional BloomFilterType bloom_filter_type = 3; //
optional int32 block_size = 4; //
optional int64 deviation_cell_version_in_sec = 5; //
}
message TableMeta {
required string table_name = 1;
repeated PrimaryKeySchema primary_key = 2;
repeated DefinedColumnSchema defined_column = 3;
repeated IndexMeta index_meta = 4;
}
/**
* failover等状况不对应表的状态变更
*
*
* failover导致访问异常到用户能够查看到表的状态变更这两个时刻之间会有一段延迟
* failover后不能说是表的整个状态变更partition的状态变更PARTIAL_FAILOVERpartial的粒度无法体现
*/
enum TableStatus {
ACTIVE = 1; //
INACTIVE = 2; // UnloadTable将表禁用
LOADING = 3; // partition还未全部加载完毕INACTIVE状态被Enable
UNLOADING = 4; // delete table到partition完全unload的这段期间ACTIVE状态被Unload
UPDATING = 5; // table属性变更
}
enum RowExistenceExpectation {
IGNORE = 0;
EXPECT_EXIST = 1;
EXPECT_NOT_EXIST = 2;
}
message Condition {
required RowExistenceExpectation row_existence = 1;
optional bytes column_condition = 2;
}
message CapacityUnit {
optional int32 read = 1;
optional int32 write = 2;
}
message ReservedThroughputDetails {
required CapacityUnit capacity_unit = 1; //
required int64 last_increase_time = 2; //
optional int64 last_decrease_time = 3; //
}
message ReservedThroughput {
required CapacityUnit capacity_unit = 1;
}
message ConsumedCapacity {
required CapacityUnit capacity_unit = 1;
}
message StreamSpecification {
required bool enable_stream = 1;
optional int32 expiration_time = 2;
}
message StreamDetails {
required bool enable_stream = 1;
optional string stream_id = 2;
optional int32 expiration_time = 3;
optional int64 last_enable_time = 4;
}
/* ############################################# CreateTable ############################################# */
/**
* table_meta用于存储表中不可更改的schema属性ReservedThroughput和TableOptions独立出来UpdateTable的参数
* GlobalIndex和LocalIndex之后
* message CreateTableRequest {
* required TableMeta table_meta = 1;
* required ReservedThroughput reserved_throughput = 2;
* required TableOptions table_options = 3;
* repeated LocalIndex local_indexes = 4; // LocalIndex不再单独包含ReservedThroughput和TableOptions
* repeated GlobalIndex global_indexes = 5; // GlobalIndex内单独包含ReservedThroughput和TableOptions
* }
*/
message CreateTableRequest {
required TableMeta table_meta = 1;
required ReservedThroughput reserved_throughput = 2; // TableOptions内UpdateTableResponse中会返回ReservedThroughputDetailsTableOptions没有类似的返回结构
optional TableOptions table_options = 3;
repeated PartitionRange partitions = 4;
optional StreamSpecification stream_spec = 5;
repeated IndexMeta index_metas = 7;
}
message CreateTableResponse {
}
/* ######################################################################################################### */
/* ############################################# UpdateTable ############################################# */
message UpdateTableRequest {
required string table_name = 1;
optional ReservedThroughput reserved_throughput = 2;
optional TableOptions table_options = 3;
optional StreamSpecification stream_spec = 4;
}
message UpdateTableResponse {
required ReservedThroughputDetails reserved_throughput_details = 1;
required TableOptions table_options = 2;
optional StreamDetails stream_details = 3;
}
/* ######################################################################################################### */
/* ############################################# DescribeTable ############################################# */
message DescribeTableRequest {
required string table_name = 1;
}
message DescribeTableResponse {
required TableMeta table_meta = 1;
required ReservedThroughputDetails reserved_throughput_details = 2;
required TableOptions table_options = 3;
required TableStatus table_status = 4;
optional StreamDetails stream_details = 5;
repeated bytes shard_splits = 6;
repeated IndexMeta index_metas = 8;
}
/* ########################################################################################################### */
/* ############################################# ListTable ############################################# */
message ListTableRequest {
}
/**
*
* DescribeTable来获取
*/
message ListTableResponse {
repeated string table_names = 1;
}
/* ####################################################################################################### */
/* ############################################# DeleteTable ############################################# */
message DeleteTableRequest {
required string table_name = 1;
}
message DeleteTableResponse {
}
/* ######################################################################################################### */
/* ############################################# LoadTable ############################################# */
message LoadTableRequest {
required string table_name = 1;
}
message LoadTableResponse {
}
/* ######################################################################################################### */
/* ############################################# UnloadTable ############################################# */
message UnloadTableRequest {
required string table_name = 1;
}
message UnloadTableResponse {
}
/* ########################################################################################################## */
/**
* 0INT64.MAX
* 1. start_time和end_time
* 2. specific_time
*/
message TimeRange {
optional int64 start_time = 1;
optional int64 end_time = 2;
optional int64 specific_time = 3;
}
/* ############################################# GetRow ############################################# */
enum ReturnType {
RT_NONE = 0;
RT_PK = 1;
RT_AFTER_MODIFY = 2;
}
message ReturnContent {
optional ReturnType return_type = 1;
repeated string return_column_names = 2;
}
/**
* 1.
* 2.
*/
message GetRowRequest {
required string table_name = 1;
required bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key
repeated string columns_to_get = 3; //
optional TimeRange time_range = 4;
optional int32 max_versions = 5;
optional bool cache_blocks = 6 [default = true]; // BlockCache
optional bytes filter = 7;
optional string start_column = 8;
optional string end_column = 9;
optional bytes token = 10;
optional string transaction_id = 11;
}
message GetRowResponse {
required ConsumedCapacity consumed = 1;
required bytes row = 2; // encoded as InplaceRowChangeSet
optional bytes next_token = 3;
}
/* #################################################################################################### */
/* ############################################# UpdateRow ############################################# */
message UpdateRowRequest {
required string table_name = 1;
required bytes row_change = 2;
required Condition condition = 3;
optional ReturnContent return_content = 4;
optional string transaction_id = 5;
}
message UpdateRowResponse {
required ConsumedCapacity consumed = 1;
optional bytes row = 2;
}
/* ####################################################################################################### */
/* ############################################# PutRow ############################################# */
/**
* timestamptimestamp
* timestamp的timestamp增强了规范性但是丧失了灵活性
*/
message PutRowRequest {
required string table_name = 1;
required bytes row = 2; // encoded as InplaceRowChangeSet
required Condition condition = 3;
optional ReturnContent return_content = 4;
optional string transaction_id = 5;
}
message PutRowResponse {
required ConsumedCapacity consumed = 1;
optional bytes row = 2;
}
/* #################################################################################################### */
/* ############################################# DeleteRow ############################################# */
/**
* OTS只支持删除该行的所有列所有版本
* 1.
*/
message DeleteRowRequest {
required string table_name = 1;
required bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key
required Condition condition = 3;
optional ReturnContent return_content = 4;
optional string transaction_id = 5;
}
message DeleteRowResponse {
required ConsumedCapacity consumed = 1;
optional bytes row = 2;
}
/* ####################################################################################################### */
/* ############################################# BatchGetRow ############################################# */
/**
* HBase支持Batch操作的每行都拥有不同的查询参数OTS不支持
*/
message TableInBatchGetRowRequest {
required string table_name = 1;
repeated bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key
repeated bytes token = 3;
repeated string columns_to_get = 4; //
optional TimeRange time_range = 5;
optional int32 max_versions = 6;
optional bool cache_blocks = 7 [default = true]; // BlockCache
optional bytes filter = 8;
optional string start_column = 9;
optional string end_column = 10;
}
message BatchGetRowRequest {
repeated TableInBatchGetRowRequest tables = 1;
}
message RowInBatchGetRowResponse {
required bool is_ok = 1;
optional Error error = 2;
optional ConsumedCapacity consumed = 3;
optional bytes row = 4; // encoded as InplaceRowChangeSet
optional bytes next_token = 5;
}
message TableInBatchGetRowResponse {
required string table_name = 1;
repeated RowInBatchGetRowResponse rows = 2;
}
message BatchGetRowResponse {
repeated TableInBatchGetRowResponse tables = 1;
}
/* ######################################################################################################### */
/* ############################################# BatchWriteRow ############################################# */
enum OperationType {
PUT = 1;
UPDATE = 2;
DELETE = 3;
}
message RowInBatchWriteRowRequest {
required OperationType type = 1;
required bytes row_change = 2; // encoded as InplaceRowChangeSet
required Condition condition = 3;
optional ReturnContent return_content = 4;
}
message TableInBatchWriteRowRequest {
required string table_name = 1;
repeated RowInBatchWriteRowRequest rows = 2;
}
message BatchWriteRowRequest {
repeated TableInBatchWriteRowRequest tables = 1;
optional string transaction_id = 2;
}
message RowInBatchWriteRowResponse {
required bool is_ok = 1;
optional Error error = 2;
optional ConsumedCapacity consumed = 3;
optional bytes row = 4;
}
message TableInBatchWriteRowResponse {
required string table_name = 1;
repeated RowInBatchWriteRowResponse rows = 2;
}
message BatchWriteRowResponse {
repeated TableInBatchWriteRowResponse tables = 1;
}
/* ########################################################################################################### */
/* ############################################# GetRange ############################################# */
enum Direction {
FORWARD = 0;
BACKWARD = 1;
}
/**
* HBase支持以下参数
* 1. TimeRange或指定time
* 2. Filter
*
*/
message GetRangeRequest {
required string table_name = 1;
required Direction direction = 2;
repeated string columns_to_get = 3; //
optional TimeRange time_range = 4;
optional int32 max_versions = 5;
optional int32 limit = 6;
required bytes inclusive_start_primary_key = 7; // encoded as InplaceRowChangeSet, but only has primary key
required bytes exclusive_end_primary_key = 8; // encoded as InplaceRowChangeSet, but only has primary key
optional bool cache_blocks = 9 [default = true]; // BlockCache
optional bytes filter = 10;
optional string start_column = 11;
optional string end_column = 12;
optional bytes token = 13;
optional string transaction_id = 14;
}
message GetRangeResponse {
required ConsumedCapacity consumed = 1;
required bytes rows = 2; // encoded as InplaceRowChangeSet
optional bytes next_start_primary_key = 3; // . encoded as InplaceRowChangeSet, but only has primary key
optional bytes next_token = 4;
}
/* ###################################################################################################### */
/* ############################################# Stream ############################################# */
message ListStreamRequest {
optional string table_name = 1;
}
message Stream {
required string stream_id = 1;
required string table_name = 2;
required int64 creation_time = 3;
}
message ListStreamResponse {
repeated Stream streams = 1;
}
message StreamShard {
required string shard_id = 1;
optional string parent_id = 2;
optional string parent_sibling_id = 3;
}
enum StreamStatus {
STREAM_ENABLING = 1;
STREAM_ACTIVE = 2;
}
message DescribeStreamRequest {
required string stream_id = 1;
optional string inclusive_start_shard_id = 2;
optional int32 shard_limit = 3;
}
message DescribeStreamResponse {
required string stream_id = 1;
required int32 expiration_time = 2;
required string table_name = 3;
required int64 creation_time = 4;
required StreamStatus stream_status = 5;
repeated StreamShard shards = 6;
optional string next_shard_id = 7;
}
message GetShardIteratorRequest {
required string stream_id = 1;
required string shard_id = 2;
optional int64 timestamp = 3;
optional string token = 4;
}
message GetShardIteratorResponse {
required string shard_iterator = 1;
optional string next_token = 2;
}
message GetStreamRecordRequest {
required string shard_iterator = 1;
optional int32 limit = 2;
}
enum ActionType {
PUT_ROW = 1;
UPDATE_ROW = 2;
DELETE_ROW = 3;
}
message GetStreamRecordResponse {
message StreamRecord {
required ActionType action_type = 1;
required bytes record = 2;
}
repeated StreamRecord stream_records = 1;
optional string next_shard_iterator = 2;
}
/* +++++ ComputeSplitPointsBySize +++++ */
message ComputeSplitPointsBySizeRequest {
required string table_name = 1;
required int64 split_size = 2; // in 100MB
}
message ComputeSplitPointsBySizeResponse {
required ConsumedCapacity consumed = 1;
repeated PrimaryKeySchema schema = 2;
/**
* Split points between splits, in the increasing order
*
* A split is a consecutive range of primary keys,
* whose data size is about split_size specified in the request.
* The size could be hard to be precise.
*
* A split point is an array of primary-key column w.r.t. table schema,
* which is never longer than that of table schema.
* Tailing -inf will be omitted to reduce transmission payloads.
*/
repeated bytes split_points = 3;
/**
* Locations where splits lies in.
*
* By the managed nature of TableStore, these locations are no more than hints.
* If a location is not suitable to be seen, an empty string will be placed.
*/
message SplitLocation {
required string location = 1;
required sint64 repeat = 2;
}
repeated SplitLocation locations = 4;
}
/* -------------------------------------- */
enum DefinedColumnType {
DCT_INTEGER = 1;
DCT_DOUBLE = 2;
DCT_BOOLEAN = 3;
DCT_STRING = 4;
// field 5 is reserved for date type, not supported yet
// field 6 is reserved for decimal type, not supported yet
DCT_BLOB = 7;
}
message DefinedColumnSchema {
required string name = 1;
required DefinedColumnType type = 2;
}
enum IndexUpdateMode {
IUM_ASYNC_INDEX = 0;
IUM_SYNC_INDEX = 1;
}
enum IndexType {
IT_GLOBAL_INDEX = 0;
IT_LOCAL_INDEX = 1;
}
message IndexMeta {
required string name = 1;
repeated string primary_key = 2;
repeated string defined_column = 3;
required IndexUpdateMode index_update_mode = 4;
required IndexType index_type = 5;
}
message CreateIndexRequest {
required string main_table_name = 1;
required IndexMeta index_meta = 2;
optional bool include_base_data = 3;
}
message CreateIndexResponse {
}
message DropIndexRequest {
required string main_table_name = 1;
required string index_name = 2;
}
message DropIndexResponse {
}
/* ########################################### LocalTransaction ########################################### */
message StartLocalTransactionRequest {
required string table_name = 1;
required bytes key = 2; // encoded as SQLVariant
}
message StartLocalTransactionResponse {
required string transaction_id = 1;
};
message CommitTransactionRequest {
required string transaction_id = 1;
}
message CommitTransactionResponse {
};
message AbortTransactionRequest {
required string transaction_id = 1;
}
message AbortTransactionResponse {
};
/* ######################################################################################################### */

View File

@ -0,0 +1,473 @@
package tablestore
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
)
const (
HEADER = 0x75
// tag type
TAG_ROW_PK = 0x1
TAG_ROW_DATA = 0x2
TAG_CELL = 0x3
TAG_CELL_NAME = 0x4
TAG_CELL_VALUE = 0x5
TAG_CELL_TYPE = 0x6
TAG_CELL_TIMESTAMP = 0x7
TAG_DELETE_ROW_MARKER = 0x8
TAG_ROW_CHECKSUM = 0x9
TAG_CELL_CHECKSUM = 0x0A
TAG_EXTENSION = 0x0B
TAG_SEQ_INFO = 0x0C
TAG_SEQ_INFO_EPOCH = 0x0D
TAG_SEQ_INFO_TS = 0x0E
TAG_SEQ_INFO_ROW_INDEX = 0x0F
// cell op type
DELETE_ALL_VERSION = 0x1
DELETE_ONE_VERSION = 0x3
INCREMENT = 0x4;
// variant type
VT_INTEGER = 0x0
VT_DOUBLE = 0x1
VT_BOOLEAN = 0x2
VT_STRING = 0x3
//public final static byte VT_NULL = 0x6;
VT_BLOB = 0x7
VT_INF_MIN = 0x9
VT_INF_MAX = 0xa
VT_AUTO_INCREMENT = 0xb
LITTLE_ENDIAN_32_SIZE = 4
LITTLE_ENDIAN_64_SIZE = 8
)
const spaceSize = 256
var crc8Table = make([]byte, spaceSize)
func init() {
for i := 0; i < spaceSize; i++ {
x := byte(i)
for j := 8; j > 0; j-- {
if (x & 0x80) != 0 {
x = (x << 1) ^ 0x07
} else {
x = (x << 1) ^ 0
}
}
crc8Table[i] = x
}
}
func crc8Byte(crc, in byte) byte {
return crc8Table[(crc^in)&0xff]
}
func crc8Int32(crc byte, in int32) byte {
for i := 0; i < 4; i++ {
crc = crc8Byte(crc, byte((in & 0xff)))
in >>= 8
}
return crc
}
func crc8Int64(crc byte, in int64) byte {
for i := 0; i < 8; i++ {
crc = crc8Byte(crc, byte((in & 0xff)))
in >>= 8
}
return crc
}
func crc8Bytes(crc byte, in []byte) byte {
for i := 0; i < len(in); i++ {
crc = crc8Byte(crc, in[i])
}
return crc
}
func writeRawByte(w io.Writer, value byte) {
w.Write([]byte{value})
}
/*func writeRawByteInt8(w io.Writer, value int) {
w.Write([]byte{byte(value)})
}*/
func writeRawLittleEndian32(w io.Writer, value int32) {
w.Write([]byte{byte((value) & 0xFF)})
w.Write([]byte{byte((value >> 8) & 0xFF)})
w.Write([]byte{byte((value >> 16) & 0xFF)})
w.Write([]byte{byte((value >> 24) & 0xFF)})
}
func writeRawLittleEndian64(w io.Writer, value int64) {
w.Write([]byte{byte((value) & 0xFF)})
w.Write([]byte{byte((value >> 8) & 0xFF)})
w.Write([]byte{byte((value >> 16) & 0xFF)})
w.Write([]byte{byte((value >> 24) & 0xFF)})
w.Write([]byte{byte((value >> 32) & 0xFF)})
w.Write([]byte{byte((value >> 40) & 0xFF)})
w.Write([]byte{byte((value >> 48) & 0xFF)})
w.Write([]byte{byte((value >> 56) & 0xFF)})
}
func writeDouble(w io.Writer, value float64) {
writeRawLittleEndian64(w, int64(math.Float64bits(value)))
}
func writeBoolean(w io.Writer, value bool) {
if value {
w.Write([]byte{byte(1)})
} else {
w.Write([]byte{byte(0)})
}
}
func writeBytes(w io.Writer, value []byte) {
w.Write(value)
}
func writeHeader(w io.Writer) {
writeRawLittleEndian32(w, HEADER)
}
func writeTag(w io.Writer, tag byte) {
writeRawByte(w, tag)
}
func writeCellName(w io.Writer, name []byte) {
writeTag(w, TAG_CELL_NAME)
writeRawLittleEndian32(w, int32(len(name)))
writeBytes(w, name)
}
type PlainBufferCell struct {
cellName []byte
cellValue *ColumnValue
cellTimestamp int64
cellType byte
ignoreValue bool
hasCellTimestamp bool
hasCellType bool
}
func (cell *PlainBufferCell) writeCell(w io.Writer) {
writeTag(w, TAG_CELL)
writeCellName(w, cell.cellName)
if cell.ignoreValue == false {
cell.cellValue.writeCellValue(w)
}
if cell.hasCellType {
writeTag(w, TAG_CELL_TYPE)
writeRawByte(w, cell.cellType)
}
if cell.hasCellTimestamp {
writeTag(w, TAG_CELL_TIMESTAMP)
writeRawLittleEndian64(w, cell.cellTimestamp)
}
writeTag(w, TAG_CELL_CHECKSUM)
writeRawByte(w, cell.getCheckSum(byte(0x0)))
}
func (cell *PlainBufferCell) getCheckSum(crc byte) byte {
crc = crc8Bytes(crc, cell.cellName)
if cell.ignoreValue == false {
crc = cell.cellValue.getCheckSum(crc)
}
if cell.hasCellTimestamp {
crc = crc8Int64(crc, cell.cellTimestamp)
}
if cell.hasCellType {
crc = crc8Byte(crc, cell.cellType)
}
return crc
}
type PlainBufferRow struct {
primaryKey []*PlainBufferCell
cells []*PlainBufferCell
hasDeleteMarker bool
extension *RecordSequenceInfo // optional
}
func (row *PlainBufferRow) writeRow(w io.Writer) {
/* pk */
writeTag(w, TAG_ROW_PK)
for _, pk := range row.primaryKey {
pk.writeCell(w)
}
if len(row.cells) > 0 {
writeTag(w, TAG_ROW_DATA)
for _, cell := range row.cells {
cell.writeCell(w)
}
}
writeTag(w, TAG_ROW_CHECKSUM)
writeRawByte(w, row.getCheckSum(byte(0x0)))
}
func (row *PlainBufferRow) writeRowWithHeader(w io.Writer) {
writeHeader(w)
row.writeRow(w)
}
func (row *PlainBufferRow) getCheckSum(crc byte) byte {
for _, cell := range row.primaryKey {
crcCell := cell.getCheckSum(byte(0x0))
crc = crc8Byte(crc, crcCell)
}
for _, cell := range row.cells {
crcCell := cell.getCheckSum(byte(0x0))
crc = crc8Byte(crc, crcCell)
}
del := byte(0x0)
if row.hasDeleteMarker {
del = byte(0x1)
}
crc = crc8Byte(crc, del)
return crc
}
func readRawByte(r *bytes.Reader) byte {
if r.Len() == 0 {
panic(errUnexpectIoEnd)
}
b, _ := r.ReadByte()
return b
}
func readTag(r *bytes.Reader) int {
return int(readRawByte(r))
}
func readRawLittleEndian64(r *bytes.Reader) int64 {
if r.Len() < 8 {
panic(errUnexpectIoEnd)
}
var v int64
binary.Read(r, binary.LittleEndian, &v)
return v
}
func readRawLittleEndian32(r *bytes.Reader) int32 {
if r.Len() < 4 {
panic(errUnexpectIoEnd)
}
var v int32
binary.Read(r, binary.LittleEndian, &v)
return v
}
func readBoolean(r *bytes.Reader) bool {
return readRawByte(r) != 0
}
func readBytes(r *bytes.Reader, size int32) []byte {
if int32(r.Len()) < size {
panic(errUnexpectIoEnd)
}
v := make([]byte, size)
r.Read(v)
return v
}
func readCellValue(r *bytes.Reader) *ColumnValue {
value := new(ColumnValue)
readRawLittleEndian32(r)
tp := readRawByte(r)
switch tp {
case VT_INTEGER:
value.Type = ColumnType_INTEGER
value.Value = readRawLittleEndian64(r)
case VT_DOUBLE:
value.Type = ColumnType_DOUBLE
value.Value = math.Float64frombits(uint64(readRawLittleEndian64(r)))
case VT_BOOLEAN:
value.Type = ColumnType_BOOLEAN
value.Value = readBoolean(r)
case VT_STRING:
value.Type = ColumnType_STRING
value.Value = string(readBytes(r, readRawLittleEndian32(r)))
case VT_BLOB:
value.Type = ColumnType_BINARY
value.Value = []byte(readBytes(r, readRawLittleEndian32(r)))
}
return value
}
func readCell(r *bytes.Reader) *PlainBufferCell {
cell := new(PlainBufferCell)
tag := readTag(r)
if tag != TAG_CELL_NAME {
panic(errTag)
}
cell.cellName = readBytes(r, readRawLittleEndian32(r))
tag = readTag(r)
if tag == TAG_CELL_VALUE {
cell.cellValue = readCellValue(r)
tag = readTag(r)
}
if tag == TAG_CELL_TYPE {
readRawByte(r)
tag = readTag(r)
}
if tag == TAG_CELL_TIMESTAMP {
cell.cellTimestamp = readRawLittleEndian64(r)
tag = readTag(r)
}
if tag == TAG_CELL_CHECKSUM {
readRawByte(r)
} else {
panic(errNoChecksum)
}
return cell
}
func readRowPk(r *bytes.Reader) []*PlainBufferCell {
primaryKeyColumns := make([]*PlainBufferCell, 0, 4)
tag := readTag(r)
for tag == TAG_CELL {
primaryKeyColumns = append(primaryKeyColumns, readCell(r))
tag = readTag(r)
}
r.Seek(-1, 1)
return primaryKeyColumns
}
func readRowData(r *bytes.Reader) []*PlainBufferCell {
columns := make([]*PlainBufferCell, 0, 10)
tag := readTag(r)
for tag == TAG_CELL {
columns = append(columns, readCell(r))
tag = readTag(r)
}
r.Seek(-1, 1)
return columns
}
func readRow(r *bytes.Reader) *PlainBufferRow {
row := new(PlainBufferRow)
tag := readTag(r)
if tag == TAG_ROW_PK {
row.primaryKey = readRowPk(r)
tag = readTag(r)
}
if tag == TAG_ROW_DATA {
row.cells = readRowData(r)
tag = readTag(r)
}
if tag == TAG_DELETE_ROW_MARKER {
row.hasDeleteMarker = true
tag = readTag(r)
}
if tag == TAG_EXTENSION {
row.extension = readRowExtension(r)
tag = readTag(r)
}
if tag == TAG_ROW_CHECKSUM {
readRawByte(r)
} else {
panic(errNoChecksum)
}
return row
}
func readRowsWithHeader(r *bytes.Reader) (rows []*PlainBufferRow, err error) {
defer func() {
if err2 := recover(); err2 != nil {
if _, ok := err2.(error); ok {
err = err2.(error)
}
return
}
}()
// TODO: panic
if readRawLittleEndian32(r) != HEADER {
return nil, fmt.Errorf("Invalid header from plain buffer")
}
rows = make([]*PlainBufferRow, 0, 10)
for r.Len() > 0 {
rows = append(rows, readRow(r))
}
return rows, nil
}
func readRowExtension(r *bytes.Reader) *RecordSequenceInfo {
readRawLittleEndian32(r) // useless
tag := readTag(r)
if tag != TAG_SEQ_INFO {
panic(errTag)
}
readRawLittleEndian32(r) // useless
tag = readTag(r)
if tag != TAG_SEQ_INFO_EPOCH {
panic(errTag)
}
epoch := readRawLittleEndian32(r)
tag = readTag(r)
if tag != TAG_SEQ_INFO_TS {
panic(errTag)
}
ts := readRawLittleEndian64(r)
tag = readTag(r)
if tag != TAG_SEQ_INFO_ROW_INDEX {
panic(errTag)
}
rowIndex := readRawLittleEndian32(r)
ext := RecordSequenceInfo{}
ext.Epoch = epoch
ext.Timestamp = ts
ext.RowIndex = rowIndex
return &ext
}

View File

@ -0,0 +1,14 @@
package search
import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
type Collapse struct {
FieldName string
}
func (c *Collapse) ProtoBuffer() (*otsprotocol.Collapse, error) {
pb := &otsprotocol.Collapse{
FieldName: &c.FieldName,
}
return pb, nil
}

View File

@ -0,0 +1,85 @@
package search
import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
type QueryType int
const (
QueryType_None QueryType = 0
QueryType_MatchQuery QueryType = 1
QueryType_MatchPhraseQuery QueryType = 2
QueryType_TermQuery QueryType = 3
QueryType_RangeQuery QueryType = 4
QueryType_PrefixQuery QueryType = 5
QueryType_BoolQuery QueryType = 6
QueryType_ConstScoreQuery QueryType = 7
QueryType_FunctionScoreQuery QueryType = 8
QueryType_NestedQuery QueryType = 9
QueryType_WildcardQuery QueryType = 10
QueryType_MatchAllQuery QueryType = 11
QueryType_GeoBoundingBoxQuery QueryType = 12
QueryType_GeoDistanceQuery QueryType = 13
QueryType_GeoPolygonQuery QueryType = 14
QueryType_TermsQuery QueryType = 15
)
func (q QueryType) Enum() *QueryType {
newQuery := q
return &newQuery
}
func (q QueryType) ToPB() *otsprotocol.QueryType {
switch q {
case QueryType_None:
return nil
case QueryType_MatchQuery:
return otsprotocol.QueryType_MATCH_QUERY.Enum()
case QueryType_MatchPhraseQuery:
return otsprotocol.QueryType_MATCH_PHRASE_QUERY.Enum()
case QueryType_TermQuery:
return otsprotocol.QueryType_TERM_QUERY.Enum()
case QueryType_RangeQuery:
return otsprotocol.QueryType_RANGE_QUERY.Enum()
case QueryType_PrefixQuery:
return otsprotocol.QueryType_PREFIX_QUERY.Enum()
case QueryType_BoolQuery:
return otsprotocol.QueryType_BOOL_QUERY.Enum()
case QueryType_ConstScoreQuery:
return otsprotocol.QueryType_CONST_SCORE_QUERY.Enum()
case QueryType_FunctionScoreQuery:
return otsprotocol.QueryType_FUNCTION_SCORE_QUERY.Enum()
case QueryType_NestedQuery:
return otsprotocol.QueryType_NESTED_QUERY.Enum()
case QueryType_WildcardQuery:
return otsprotocol.QueryType_WILDCARD_QUERY.Enum()
case QueryType_MatchAllQuery:
return otsprotocol.QueryType_MATCH_ALL_QUERY.Enum()
case QueryType_GeoBoundingBoxQuery:
return otsprotocol.QueryType_GEO_BOUNDING_BOX_QUERY.Enum()
case QueryType_GeoDistanceQuery:
return otsprotocol.QueryType_GEO_DISTANCE_QUERY.Enum()
case QueryType_GeoPolygonQuery:
return otsprotocol.QueryType_GEO_POLYGON_QUERY.Enum()
case QueryType_TermsQuery:
return otsprotocol.QueryType_TERMS_QUERY.Enum()
default:
panic("unexpected")
}
}
type Query interface {
Type() QueryType
Serialize() ([]byte, error)
ProtoBuffer() (*otsprotocol.Query, error)
}
func BuildPBForQuery(q Query) (*otsprotocol.Query, error) {
query := &otsprotocol.Query{}
query.Type = q.Type().ToPB()
data, err := q.Serialize()
if err != nil {
return nil, err
}
query.Query = data
return query, nil
}

View File

@ -0,0 +1,75 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type BoolQuery struct {
MustQueries []Query
MustNotQueries []Query
FilterQueries []Query
ShouldQueries []Query
MinimumShouldMatch *int32
}
func (q *BoolQuery) Type() QueryType {
return QueryType_BoolQuery
}
func (q *BoolQuery) Serialize() ([]byte, error) {
query := &otsprotocol.BoolQuery{}
if q.MustQueries != nil {
pbMustQs := make([]*otsprotocol.Query, 0)
for _, mustQ := range q.MustQueries {
pbQ, err := mustQ.ProtoBuffer()
if err != nil {
return nil, err
}
pbMustQs = append(pbMustQs, pbQ)
}
query.MustQueries = pbMustQs
}
if q.MustNotQueries != nil {
pbMustNotQs := make([]*otsprotocol.Query, 0)
for _, mustNotQ := range q.MustNotQueries {
pbQ, err := mustNotQ.ProtoBuffer()
if err != nil {
return nil, err
}
pbMustNotQs = append(pbMustNotQs, pbQ)
}
query.MustNotQueries = pbMustNotQs
}
if q.FilterQueries != nil {
pbFilterQs := make([]*otsprotocol.Query, 0)
for _, filterQ := range q.FilterQueries {
pbQ, err := filterQ.ProtoBuffer()
if err != nil {
return nil, err
}
pbFilterQs = append(pbFilterQs, pbQ)
}
query.FilterQueries = pbFilterQs
}
if q.ShouldQueries != nil {
pbShouldQs := make([]*otsprotocol.Query, 0)
for _, shouldQ := range q.ShouldQueries {
pbQ, err := shouldQ.ProtoBuffer()
if err != nil {
return nil, err
}
pbShouldQs = append(pbShouldQs, pbQ)
}
query.ShouldQueries = pbShouldQs
}
if (q.MinimumShouldMatch != nil) {
query.MinimumShouldMatch = q.MinimumShouldMatch
}
data, err := proto.Marshal(query)
return data, err
}
func (q *BoolQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,29 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type ConstScoreQuery struct {
Filter Query
}
func (q *ConstScoreQuery) Type() QueryType {
return QueryType_ConstScoreQuery
}
func (q *ConstScoreQuery) Serialize() ([]byte, error) {
query := &otsprotocol.ConstScoreQuery{}
pbQ, err := q.Filter.ProtoBuffer()
if err != nil {
return nil, err
}
query.Filter = pbQ
data, err := proto.Marshal(query)
return data, err
}
func (q *ConstScoreQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,49 @@
package search
import (
"errors"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type FieldValueFactor struct {
FieldName string
}
func (f *FieldValueFactor) ProtoBuffer() (*otsprotocol.FieldValueFactor, error) {
pb := &otsprotocol.FieldValueFactor{}
pb.FieldName = &f.FieldName
return pb, nil
}
type FunctionScoreQuery struct {
Query Query
FieldValueFactor *FieldValueFactor
}
func (q *FunctionScoreQuery) Type() QueryType {
return QueryType_FunctionScoreQuery
}
func (q *FunctionScoreQuery) Serialize() ([]byte, error) {
if q.Query == nil || q.FieldValueFactor == nil {
return nil, errors.New("FunctionScoreQuery: Query or FieldValueFactor is nil")
}
query := &otsprotocol.FunctionScoreQuery{}
pbQ, err := q.Query.ProtoBuffer()
if err != nil {
return nil, err
}
query.Query = pbQ
pbF, err := q.FieldValueFactor.ProtoBuffer()
if err != nil {
return nil, err
}
query.FieldValueFactor = pbF
data, err := proto.Marshal(query)
return data, err
}
func (q *FunctionScoreQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,29 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type GeoBoundingBoxQuery struct {
FieldName string
TopLeft string
BottomRight string
}
func (q *GeoBoundingBoxQuery) Type() QueryType {
return QueryType_GeoBoundingBoxQuery
}
func (q *GeoBoundingBoxQuery) Serialize() ([]byte, error) {
query := &otsprotocol.GeoBoundingBoxQuery{}
query.FieldName = &q.FieldName
query.TopLeft = &q.TopLeft
query.BottomRight = &q.BottomRight
data, err := proto.Marshal(query)
return data, err
}
func (q *GeoBoundingBoxQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,29 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type GeoDistanceQuery struct {
FieldName string
CenterPoint string
DistanceInMeter float64
}
func (q *GeoDistanceQuery) Type() QueryType {
return QueryType_GeoDistanceQuery
}
func (q *GeoDistanceQuery) Serialize() ([]byte, error) {
query := &otsprotocol.GeoDistanceQuery{}
query.FieldName = &q.FieldName
query.CenterPoint = &q.CenterPoint
query.Distance = &q.DistanceInMeter
data, err := proto.Marshal(query)
return data, err
}
func (q *GeoDistanceQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,27 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type GeoPolygonQuery struct {
FieldName string
Points []string
}
func (q *GeoPolygonQuery) Type() QueryType {
return QueryType_GeoPolygonQuery
}
func (q *GeoPolygonQuery) Serialize() ([]byte, error) {
query := &otsprotocol.GeoPolygonQuery{}
query.FieldName = &q.FieldName
query.Points = q.Points
data, err := proto.Marshal(query)
return data, err
}
func (q *GeoPolygonQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,68 @@
package search
import (
"errors"
"fmt"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type QueryOperator int8
const (
QueryOperator_OR QueryOperator = 0
QueryOperator_AND QueryOperator = 1
)
func (x QueryOperator) Enum() *QueryOperator {
p := new(QueryOperator)
*p = x
return p
}
func (o *QueryOperator) ProtoBuffer() (*otsprotocol.QueryOperator, error) {
if o == nil {
return nil, errors.New("query operator is nil")
}
if *o == QueryOperator_OR {
return otsprotocol.QueryOperator_OR.Enum(), nil
} else if *o == QueryOperator_AND {
return otsprotocol.QueryOperator_AND.Enum(), nil
} else {
return nil, errors.New("unknown query operator: " + fmt.Sprintf("%#v", *o))
}
}
type MatchQuery struct {
FieldName string
Text string
MinimumShouldMatch *int32
Operator *QueryOperator
}
func (q *MatchQuery) Type() QueryType {
return QueryType_MatchQuery
}
func (q *MatchQuery) Serialize() ([]byte, error) {
query := &otsprotocol.MatchQuery{}
query.FieldName = &q.FieldName
query.Text = &q.Text
if q.MinimumShouldMatch != nil {
query.MinimumShouldMatch = q.MinimumShouldMatch
}
if q.Operator != nil {
pbOperator, err := q.Operator.ProtoBuffer()
if err != nil {
return nil, err
}
query.Operator = pbOperator
}
data, err := proto.Marshal(query)
return data, err
}
func (q *MatchQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,27 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type MatchPhraseQuery struct {
FieldName string
Text string
}
func (q *MatchPhraseQuery) Type() QueryType {
return QueryType_MatchPhraseQuery
}
func (q *MatchPhraseQuery) Serialize() ([]byte, error) {
query := &otsprotocol.MatchPhraseQuery{}
query.FieldName = &q.FieldName
query.Text = &q.Text
data, err := proto.Marshal(query)
return data, err
}
func (q *MatchPhraseQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,23 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type MatchAllQuery struct {
}
func (q *MatchAllQuery) Type() QueryType {
return QueryType_MatchAllQuery
}
func (q *MatchAllQuery) Serialize() ([]byte, error) {
query := &otsprotocol.MatchAllQuery{}
data, err := proto.Marshal(query)
return data, err
}
func (q *MatchAllQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,54 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type ScoreModeType int
const (
ScoreMode_None ScoreModeType = 1
ScoreMode_Avg ScoreModeType = 2
ScoreMode_Max ScoreModeType = 3
ScoreMode_Total ScoreModeType = 4
ScoreMode_Min ScoreModeType = 5
)
type NestedQuery struct {
Path string
Query Query
ScoreMode ScoreModeType
}
func (q *NestedQuery) Type() QueryType {
return QueryType_NestedQuery
}
func (q *NestedQuery) Serialize() ([]byte, error) {
query := &otsprotocol.NestedQuery{}
pbQ, err := q.Query.ProtoBuffer()
if err != nil {
return nil, err
}
query.Query = pbQ
query.Path = &q.Path
switch q.ScoreMode {
case ScoreMode_None:
query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_NONE.Enum()
case ScoreMode_Avg:
query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_AVG.Enum()
case ScoreMode_Max:
query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_MAX.Enum()
case ScoreMode_Min:
query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_MIN.Enum()
case ScoreMode_Total:
query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_TOTAL.Enum()
}
data, err := proto.Marshal(query)
return data, err
}
func (q *NestedQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,27 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type PrefixQuery struct {
FieldName string
Prefix string
}
func (q *PrefixQuery) Type() QueryType {
return QueryType_PrefixQuery
}
func (q *PrefixQuery) Serialize() ([]byte, error) {
query := &otsprotocol.PrefixQuery{}
query.FieldName = &q.FieldName
query.Prefix = &q.Prefix
data, err := proto.Marshal(query)
return data, err
}
func (q *PrefixQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,75 @@
package search
import (
"errors"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type RangeQuery struct {
FieldName string
From interface{}
To interface{}
IncludeLower bool
IncludeUpper bool
}
func (q *RangeQuery) GT(value interface{}) {
q.from(value, false)
}
func (q *RangeQuery) GTE(value interface{}) {
q.from(value, true)
}
func (q *RangeQuery) LT(value interface{}) {
q.to(value, false)
}
func (q *RangeQuery) LTE(value interface{}) {
q.to(value, true)
}
func (q *RangeQuery) from(value interface{}, includeLower bool) {
q.From = value
q.IncludeLower = includeLower
}
func (q *RangeQuery) to(value interface{}, includeUpper bool) {
q.To = value
q.IncludeUpper = includeUpper
}
func (q *RangeQuery) Type() QueryType {
return QueryType_RangeQuery
}
func (q *RangeQuery) Serialize() ([]byte, error) {
if q.FieldName == "" {
return nil, errors.New("RangeQuery: fieldName not set.")
}
query := &otsprotocol.RangeQuery{}
query.FieldName = &q.FieldName
if q.From != nil {
vFrom, err := ToVariantValue(q.From)
if err != nil {
return nil, err
}
query.RangeFrom = ([]byte)(vFrom)
}
if q.To != nil {
vTo, err := ToVariantValue(q.To)
if err != nil {
return nil, err
}
query.RangeTo = ([]byte)(vTo)
}
query.IncludeLower = &q.IncludeLower
query.IncludeUpper = &q.IncludeUpper
data, err := proto.Marshal(query)
return data, err
}
func (q *RangeQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,31 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type TermQuery struct {
FieldName string
Term interface{}
}
func (q *TermQuery) Type() QueryType {
return QueryType_TermQuery
}
func (q *TermQuery) Serialize() ([]byte, error) {
term := &otsprotocol.TermQuery{}
term.FieldName = &q.FieldName
vt, err := ToVariantValue(q.Term)
if err != nil {
return nil, err
}
term.Term = []byte(vt)
data, err := proto.Marshal(term)
return data, err
}
func (q *TermQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,35 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type TermsQuery struct {
FieldName string
Terms []interface{}
}
func (q *TermsQuery) Type() QueryType {
return QueryType_TermsQuery
}
func (q *TermsQuery) Serialize() ([]byte, error) {
term := &otsprotocol.TermsQuery{}
term.FieldName = &q.FieldName
term.Terms = make([][]byte, 0)
for _, value := range q.Terms {
vt, err := ToVariantValue(value)
if err != nil {
return nil, err
}
term.Terms = append(term.Terms, []byte(vt))
}
data, err := proto.Marshal(term)
return data, err
}
func (q *TermsQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,27 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type WildcardQuery struct {
FieldName string
Value string
}
func (q *WildcardQuery) Type() QueryType {
return QueryType_WildcardQuery
}
func (q *WildcardQuery) Serialize() ([]byte, error) {
query := &otsprotocol.WildcardQuery{}
query.FieldName = &q.FieldName
query.Value = &q.Value
data, err := proto.Marshal(query)
return data, err
}
func (q *WildcardQuery) ProtoBuffer() (*otsprotocol.Query, error) {
return BuildPBForQuery(q)
}

View File

@ -0,0 +1,101 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
type SearchQuery interface {
Serialize() ([]byte, error)
}
type searchQuery struct {
Offset int32
Limit int32
Query Query
Collapse *Collapse
Sort *Sort
GetTotalCount bool
Token []byte
}
func NewSearchQuery() *searchQuery {
return &searchQuery{
Offset: -1,
Limit: -1,
GetTotalCount: false,
}
}
func (s *searchQuery) SetOffset(offset int32) *searchQuery {
s.Offset = offset
return s
}
func (s *searchQuery) SetLimit(limit int32) *searchQuery {
s.Limit = limit
return s
}
func (s *searchQuery) SetQuery(query Query) *searchQuery {
s.Query = query
return s
}
func (s *searchQuery) SetCollapse(collapse *Collapse) *searchQuery {
s.Collapse = collapse
return s
}
func (s *searchQuery) SetSort(sort *Sort) *searchQuery {
s.Sort = sort
return s
}
func (s *searchQuery) SetGetTotalCount(getTotalCount bool) *searchQuery {
s.GetTotalCount = getTotalCount
return s
}
func (s *searchQuery) SetToken(token []byte) *searchQuery {
s.Token = token
s.Sort = nil
return s
}
func (s *searchQuery) Serialize() ([]byte, error) {
search_query := &otsprotocol.SearchQuery{}
if s.Offset >= 0 {
search_query.Offset = &s.Offset
}
if s.Limit >= 0 {
search_query.Limit = &s.Limit
}
if s.Query != nil {
pbQuery, err := s.Query.ProtoBuffer()
if err != nil {
return nil, err
}
search_query.Query = pbQuery
}
if s.Collapse != nil {
pbCollapse, err := s.Collapse.ProtoBuffer()
if err != nil {
return nil, err
}
search_query.Collapse = pbCollapse
}
if s.Sort != nil {
pbSort, err := s.Sort.ProtoBuffer()
if err != nil {
return nil, err
}
search_query.Sort = pbSort
}
search_query.GetTotalCount = &s.GetTotalCount
if s.Token != nil && len(s.Token) > 0 {
search_query.Token = s.Token
}
data, err := proto.Marshal(search_query)
return data, err
}

View File

@ -0,0 +1,27 @@
package search
import (
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
)
type Sorter interface {
ProtoBuffer() (*otsprotocol.Sorter, error)
}
type Sort struct {
Sorters []Sorter
}
func (s *Sort) ProtoBuffer() (*otsprotocol.Sort, error) {
pbSort := &otsprotocol.Sort{}
pbSortors := make([]*otsprotocol.Sorter, 0)
for _, fs := range s.Sorters {
pbFs, err := fs.ProtoBuffer()
if err != nil {
return nil, err
}
pbSortors = append(pbSortors, pbFs)
}
pbSort.Sorter = pbSortors
return pbSort, nil
}

View File

@ -0,0 +1,67 @@
package search
import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
type NestedFilter struct {
Path string
Filter Query
}
func (f *NestedFilter) ProtoBuffer() (*otsprotocol.NestedFilter, error) {
pbF := &otsprotocol.NestedFilter{
Path: &f.Path,
}
pbQ, err := f.Filter.ProtoBuffer()
if err != nil {
return nil, err
}
pbF.Filter = pbQ
return pbF, err
}
type FieldSort struct {
FieldName string
Order *SortOrder
Mode *SortMode
NestedFilter *NestedFilter
}
func NewFieldSort(fieldName string, order SortOrder) *FieldSort {
return &FieldSort{
FieldName: fieldName,
Order: order.Enum(),
}
}
func (s *FieldSort) ProtoBuffer() (*otsprotocol.Sorter, error) {
pbFieldSort := &otsprotocol.FieldSort{
FieldName: &s.FieldName,
}
if s.Order != nil {
pbOrder, err := s.Order.ProtoBuffer()
if err != nil {
return nil, err
}
pbFieldSort.Order = pbOrder
}
if s.Mode != nil {
pbMode, err := s.Mode.ProtoBuffer()
if err != nil {
return nil, err
}
if pbMode != nil {
pbFieldSort.Mode = pbMode
}
}
if s.NestedFilter != nil {
pbFilter, err := s.NestedFilter.ProtoBuffer()
if err != nil {
return nil, err
}
pbFieldSort.NestedFilter = pbFilter
}
pbSorter := &otsprotocol.Sorter{
FieldSort: pbFieldSort,
}
return pbSorter, nil
}

View File

@ -0,0 +1,77 @@
package search
import (
"errors"
"fmt"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
)
type GeoDistanceType int8
const (
GeoDistanceType_ARC GeoDistanceType = 0
GeoDistanceType_PLANE GeoDistanceType = 0
)
func (t *GeoDistanceType) ProtoBuffer() (*otsprotocol.GeoDistanceType, error) {
if t == nil {
return nil, errors.New("type is nil")
}
if *t == GeoDistanceType_ARC {
return otsprotocol.GeoDistanceType_GEO_DISTANCE_ARC.Enum(), nil
} else if *t == GeoDistanceType_PLANE {
return otsprotocol.GeoDistanceType_GEO_DISTANCE_PLANE.Enum(), nil
} else {
return nil, errors.New("unknown distance type: " + fmt.Sprintf("%#v", *t))
}
}
type GeoDistanceSort struct {
FieldName string
Points []string
Order *SortOrder
Mode *SortMode
GeoDistanceType *GeoDistanceType
NestedFilter *NestedFilter
}
func (s *GeoDistanceSort) ProtoBuffer() (*otsprotocol.Sorter, error) {
pbGeoDistanceSort := &otsprotocol.GeoDistanceSort{
FieldName: &s.FieldName,
Points: s.Points,
}
if s.Order != nil {
pbOrder, err := s.Order.ProtoBuffer()
if err != nil {
return nil, err
}
pbGeoDistanceSort.Order = pbOrder
}
if s.Mode != nil {
pbMode, err := s.Mode.ProtoBuffer()
if err != nil {
return nil, err
}
if pbMode != nil {
pbGeoDistanceSort.Mode = pbMode
}
}
if s.GeoDistanceType != nil {
pbGeoDisType, err := s.GeoDistanceType.ProtoBuffer()
if err != nil {
return nil, err
}
pbGeoDistanceSort.DistanceType = pbGeoDisType
}
if s.NestedFilter != nil {
pbFilter, err := s.NestedFilter.ProtoBuffer()
if err != nil {
return nil, err
}
pbGeoDistanceSort.NestedFilter = pbFilter
}
pbSorter := &otsprotocol.Sorter{
GeoDistanceSort: pbGeoDistanceSort,
}
return pbSorter, nil
}

View File

@ -0,0 +1,36 @@
package search
import (
"errors"
"fmt"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
)
type SortMode int8
const (
SortMode_Min SortMode = 0
SortMode_Max SortMode = 1
SortMode_Avg SortMode = 2
)
func (x SortMode) Enum() *SortMode {
p := new(SortMode)
*p = x
return p
}
func (m *SortMode) ProtoBuffer() (*otsprotocol.SortMode, error) {
if m == nil {
return nil, errors.New("sort mode is nil")
}
if *m == SortMode_Min {
return otsprotocol.SortMode_SORT_MODE_MIN.Enum(), nil
} else if *m == SortMode_Max {
return otsprotocol.SortMode_SORT_MODE_MAX.Enum(), nil
} else if *m == SortMode_Avg {
return otsprotocol.SortMode_SORT_MODE_AVG.Enum(), nil
} else {
return nil, errors.New("unknown sort mode: " + fmt.Sprintf("%#v", *m))
}
}

View File

@ -0,0 +1,47 @@
package search
import (
"errors"
"fmt"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
)
type SortOrder int8
const (
SortOrder_ASC SortOrder = 0
SortOrder_DESC SortOrder = 1
)
func (x SortOrder) Enum() *SortOrder {
p := new(SortOrder)
*p = x
return p
}
func (o *SortOrder) ProtoBuffer() (*otsprotocol.SortOrder, error) {
if o == nil {
return nil, errors.New("sort order is nil")
}
if *o == SortOrder_ASC {
return otsprotocol.SortOrder_SORT_ORDER_ASC.Enum(), nil
} else if *o == SortOrder_DESC {
return otsprotocol.SortOrder_SORT_ORDER_DESC.Enum(), nil
} else {
return nil, errors.New("unknown sort order: " + fmt.Sprintf("%#v", *o))
}
}
func ParseSortOrder(order *otsprotocol.SortOrder) *SortOrder {
if order == nil {
return nil
}
if *order == otsprotocol.SortOrder_SORT_ORDER_ASC {
return SortOrder_ASC.Enum()
} else if *order == otsprotocol.SortOrder_SORT_ORDER_DESC {
return SortOrder_DESC.Enum()
} else {
return nil
}
}

View File

@ -0,0 +1,28 @@
package search
import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
type PrimaryKeySort struct {
Order *SortOrder
}
func NewPrimaryKeySort() *PrimaryKeySort {
return &PrimaryKeySort{
Order: SortOrder_ASC.Enum(),
}
}
func (s *PrimaryKeySort) ProtoBuffer() (*otsprotocol.Sorter, error) {
pbPrimaryKeySort := &otsprotocol.PrimaryKeySort{}
if s.Order != nil {
pbOrder, err := s.Order.ProtoBuffer()
if err != nil {
return nil, err
}
pbPrimaryKeySort.Order = pbOrder
}
pbSorter := &otsprotocol.Sorter{
PkSort: pbPrimaryKeySort,
}
return pbSorter, nil
}

View File

@ -0,0 +1,28 @@
package search
import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
type ScoreSort struct {
Order *SortOrder
}
func NewScoreSort() *ScoreSort {
return &ScoreSort{
Order: SortOrder_DESC.Enum(),
}
}
func (s *ScoreSort) ProtoBuffer() (*otsprotocol.Sorter, error) {
pbScoreSort := &otsprotocol.ScoreSort{}
if s.Order != nil {
pbOrder, err := s.Order.ProtoBuffer()
if err != nil {
return nil, err
}
pbScoreSort.Order = pbOrder
}
pbSorter := &otsprotocol.Sorter{
ScoreSort: pbScoreSort,
}
return pbSorter, nil
}

View File

@ -0,0 +1,74 @@
package search
import (
"encoding/binary"
"errors"
"math"
"reflect"
)
type VariantValue []byte
type VariantType byte
const (
// variant type
VT_INTEGER VariantType = 0x0
VT_DOUBLE VariantType = 0x1
VT_BOOLEAN VariantType = 0x2
VT_STRING VariantType = 0x3
)
func ToVariantValue(value interface{}) (VariantValue, error) {
t := reflect.TypeOf(value)
switch t.Kind() {
case reflect.String:
return VTString(value.(string)), nil
case reflect.Int:
return VTInteger(int64(value.(int))), nil
case reflect.Int64:
return VTInteger(value.(int64)), nil
case reflect.Float64:
return VTDouble(value.(float64)), nil
case reflect.Bool:
return VTBoolean(value.(bool)), nil
default:
return nil, errors.New("interface{} type must be string/int64/float64.")
}
}
func (v *VariantValue) GetType() VariantType {
return VariantType(([]byte)(*v)[0])
}
func VTInteger(v int64) VariantValue {
buf := make([]byte, 9)
buf[0] = byte(VT_INTEGER)
binary.LittleEndian.PutUint64(buf[1:9], uint64(v))
return (VariantValue)(buf)
}
func VTDouble(v float64) VariantValue {
buf := make([]byte, 9)
buf[0] = byte(VT_DOUBLE)
binary.LittleEndian.PutUint64(buf[1:9], math.Float64bits(v))
return (VariantValue)(buf)
}
func VTString(v string) VariantValue {
buf := make([]byte, 5+len(v))
buf[0] = byte(VT_STRING)
binary.LittleEndian.PutUint32(buf[1:5], uint32(len(v)))
copy(buf[5:], v)
return (VariantValue)(buf)
}
func VTBoolean(b bool) VariantValue {
buf := make([]byte, 2)
buf[0] = byte(VT_BOOLEAN)
if b {
buf[1] = 1
} else {
buf[1] = 0
}
return (VariantValue)(buf)
}

View File

@ -0,0 +1,136 @@
package tablestore
import (
"bytes"
"errors"
"fmt"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/golang/protobuf/proto"
)
func (tableStoreClient *TableStoreClient) CreateSearchIndex(request *CreateSearchIndexRequest) (*CreateSearchIndexResponse, error) {
req := new(otsprotocol.CreateSearchIndexRequest)
req.TableName = proto.String(request.TableName)
req.IndexName = proto.String(request.IndexName)
var err error
req.Schema, err = convertToPbSchema(request.IndexSchema)
if err != nil {
return nil, err
}
resp := new(otsprotocol.CreateSearchIndexRequest)
response := &CreateSearchIndexResponse{}
if err := tableStoreClient.doRequestWithRetry(createSearchIndexUri, req, resp, &response.ResponseInfo); err != nil {
return nil, err
}
return response, nil
}
func (tableStoreClient *TableStoreClient) DeleteSearchIndex(request *DeleteSearchIndexRequest) (*DeleteSearchIndexResponse, error) {
req := new(otsprotocol.DeleteSearchIndexRequest)
req.TableName = proto.String(request.TableName)
req.IndexName = proto.String(request.IndexName)
resp := new(otsprotocol.DeleteSearchIndexResponse)
response := &DeleteSearchIndexResponse{}
if err := tableStoreClient.doRequestWithRetry(deleteSearchIndexUri, req, resp, &response.ResponseInfo); err != nil {
return nil, err
}
return response, nil
}
func (tableStoreClient *TableStoreClient) ListSearchIndex(request *ListSearchIndexRequest) (*ListSearchIndexResponse, error) {
req := new(otsprotocol.ListSearchIndexRequest)
req.TableName = proto.String(request.TableName)
resp := new(otsprotocol.ListSearchIndexResponse)
response := &ListSearchIndexResponse{}
if err := tableStoreClient.doRequestWithRetry(listSearchIndexUri, req, resp, &response.ResponseInfo); err != nil {
return nil, err
}
indexs := make([]*IndexInfo, 0)
for _, info := range resp.Indices {
indexs = append(indexs, &IndexInfo{
TableName: *info.TableName,
IndexName: *info.IndexName,
})
}
response.IndexInfo = indexs
return response, nil
}
func (tableStoreClient *TableStoreClient) DescribeSearchIndex(request *DescribeSearchIndexRequest) (*DescribeSearchIndexResponse, error) {
req := new(otsprotocol.DescribeSearchIndexRequest)
req.TableName = proto.String(request.TableName)
req.IndexName = proto.String(request.IndexName)
resp := new(otsprotocol.DescribeSearchIndexResponse)
response := &DescribeSearchIndexResponse{}
if err := tableStoreClient.doRequestWithRetry(describeSearchIndexUri, req, resp, &response.ResponseInfo); err != nil {
return nil, err
}
schema, err := parseFromPbSchema(resp.Schema)
if err != nil {
return nil, err
}
response.Schema = schema
if resp.SyncStat != nil {
response.SyncStat = &SyncStat{
CurrentSyncTimestamp: resp.SyncStat.CurrentSyncTimestamp,
}
syncPhase := resp.SyncStat.SyncPhase
if syncPhase == nil {
return nil, errors.New("missing [SyncPhase] in DescribeSearchIndexResponse")
} else if *syncPhase == otsprotocol.SyncPhase_FULL {
response.SyncStat.SyncPhase = SyncPhase_FULL
} else if *syncPhase == otsprotocol.SyncPhase_INCR {
response.SyncStat.SyncPhase = SyncPhase_INCR
} else {
return nil, errors.New(fmt.Sprintf("unknown SyncPhase: %v", syncPhase))
}
}
return response, nil
}
func (tableStoreClient *TableStoreClient) Search(request *SearchRequest) (*SearchResponse, error) {
req, err := request.ProtoBuffer()
if err != nil {
return nil, err
}
resp := new(otsprotocol.SearchResponse)
response := &SearchResponse{}
if err := tableStoreClient.doRequestWithRetry(searchUri, req, resp, &response.ResponseInfo); err != nil {
return nil, err
}
response.TotalCount = *resp.TotalHits
rows := make([]*PlainBufferRow, 0)
for _, buf := range resp.Rows {
row, err := readRowsWithHeader(bytes.NewReader(buf))
if err != nil {
return nil, err
}
rows = append(rows, row[0])
}
for _, row := range rows {
currentRow := &Row{}
currentPk := new(PrimaryKey)
for _, pk := range row.primaryKey {
pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value}
currentPk.PrimaryKeys = append(currentPk.PrimaryKeys, pkColumn)
}
currentRow.PrimaryKey = currentPk
for _, cell := range row.cells {
dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp}
currentRow.Columns = append(currentRow.Columns, dataColumn)
}
response.Rows = append(response.Rows, currentRow)
}
response.IsAllSuccess = *resp.IsAllSucceeded
if resp.NextToken != nil && len(resp.NextToken) > 0 {
response.NextToken = resp.NextToken
}
return response, nil
}

View File

@ -0,0 +1,327 @@
package tablestore
import (
"encoding/json"
"errors"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol"
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search"
"github.com/golang/protobuf/proto"
)
type ColumnsToGet struct {
Columns []string
ReturnAll bool
}
type SearchRequest struct {
TableName string
IndexName string
SearchQuery search.SearchQuery
ColumnsToGet *ColumnsToGet
RoutingValues []*PrimaryKey
}
func (r *SearchRequest) SetTableName(tableName string) *SearchRequest {
r.TableName = tableName
return r
}
func (r *SearchRequest) SetIndexName(indexName string) *SearchRequest {
r.IndexName = indexName
return r
}
func (r *SearchRequest) SetSearchQuery(searchQuery search.SearchQuery) *SearchRequest {
r.SearchQuery = searchQuery
return r
}
func (r *SearchRequest) SetColumnsToGet(columnToGet *ColumnsToGet) *SearchRequest {
r.ColumnsToGet = columnToGet
return r
}
func (r *SearchRequest) SetRoutingValues(routingValues []*PrimaryKey) *SearchRequest {
r.RoutingValues = routingValues
return r
}
func (r *SearchRequest) AddRoutingValue(routingValue *PrimaryKey) *SearchRequest {
r.RoutingValues = append(r.RoutingValues, routingValue)
return r
}
func (r *SearchRequest) ProtoBuffer() (*otsprotocol.SearchRequest, error) {
req := &otsprotocol.SearchRequest{}
req.TableName = &r.TableName
req.IndexName = &r.IndexName
query, err := r.SearchQuery.Serialize()
if err != nil {
return nil, err
}
req.SearchQuery = query
pbColumns := &otsprotocol.ColumnsToGet{}
pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_NONE.Enum()
if r.ColumnsToGet != nil {
if r.ColumnsToGet.ReturnAll {
pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_ALL.Enum()
} else if len(r.ColumnsToGet.Columns) > 0 {
pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_SPECIFIED.Enum()
pbColumns.ColumnNames = r.ColumnsToGet.Columns
}
}
req.ColumnsToGet = pbColumns
if r.RoutingValues != nil {
for _, routingValue := range r.RoutingValues {
req.RoutingValues = append(req.RoutingValues, routingValue.Build(false))
}
}
return req, err
}
type SearchResponse struct {
TotalCount int64
Rows []*Row
IsAllSuccess bool
NextToken []byte
ResponseInfo
}
func convertFieldSchemaToPBFieldSchema(fieldSchemas []*FieldSchema) []*otsprotocol.FieldSchema {
var schemas []*otsprotocol.FieldSchema
for _, value := range fieldSchemas {
field := new(otsprotocol.FieldSchema)
field.FieldName = proto.String(*value.FieldName)
field.FieldType = otsprotocol.FieldType(int32(value.FieldType)).Enum()
if value.Index != nil {
field.Index = proto.Bool(*value.Index)
} else if value.FieldType != FieldType_NESTED {
field.Index = proto.Bool(true)
}
if value.IndexOptions != nil {
field.IndexOptions = otsprotocol.IndexOptions(int32(*value.IndexOptions)).Enum()
}
if value.Analyzer != nil {
field.Analyzer = proto.String(string(*value.Analyzer))
}
if value.EnableSortAndAgg != nil {
field.DocValues = proto.Bool(*value.EnableSortAndAgg)
}
if value.Store != nil {
field.Store = proto.Bool(*value.Store)
} else if value.FieldType != FieldType_NESTED {
if *field.FieldType == otsprotocol.FieldType_TEXT {
field.Store = proto.Bool(false)
} else {
field.Store = proto.Bool(true)
}
}
if value.IsArray != nil {
field.IsArray = proto.Bool(*value.IsArray)
}
if value.FieldType == FieldType_NESTED {
field.FieldSchemas = convertFieldSchemaToPBFieldSchema(value.FieldSchemas)
}
schemas = append(schemas, field)
}
return schemas
}
func convertToPbSchema(schema *IndexSchema) (*otsprotocol.IndexSchema, error) {
indexSchema := new(otsprotocol.IndexSchema)
indexSchema.FieldSchemas = convertFieldSchemaToPBFieldSchema(schema.FieldSchemas)
indexSchema.IndexSetting = new(otsprotocol.IndexSetting)
var defaultNumberOfShards int32 = 1
indexSchema.IndexSetting.NumberOfShards = &defaultNumberOfShards
if schema.IndexSetting != nil {
indexSchema.IndexSetting.RoutingFields = schema.IndexSetting.RoutingFields
}
if schema.IndexSort != nil {
pbSort, err := schema.IndexSort.ProtoBuffer()
if err != nil {
return nil, err
}
indexSchema.IndexSort = pbSort
}
return indexSchema, nil
}
func parseFieldSchemaFromPb(pbFieldSchemas []*otsprotocol.FieldSchema) []*FieldSchema {
var schemas []*FieldSchema
for _, value := range pbFieldSchemas {
field := new(FieldSchema)
field.FieldName = value.FieldName
field.FieldType = FieldType(*value.FieldType)
field.Index = value.Index
if value.IndexOptions != nil {
indexOption := IndexOptions(*value.IndexOptions)
field.IndexOptions = &indexOption
}
field.Analyzer = (*Analyzer)(value.Analyzer)
field.EnableSortAndAgg = value.DocValues
field.Store = value.Store
field.IsArray = value.IsArray
if field.FieldType == FieldType_NESTED {
field.FieldSchemas = parseFieldSchemaFromPb(value.FieldSchemas)
}
schemas = append(schemas, field)
}
return schemas
}
func parseIndexSortFromPb(pbIndexSort *otsprotocol.Sort) (*search.Sort, error) {
indexSort := &search.Sort{
Sorters: make([]search.Sorter, 0),
}
for _, sorter := range pbIndexSort.GetSorter() {
if sorter.GetFieldSort() != nil {
fieldSort := &search.FieldSort{
FieldName: *sorter.GetFieldSort().FieldName,
Order: search.ParseSortOrder(sorter.GetFieldSort().Order),
}
indexSort.Sorters = append(indexSort.Sorters, fieldSort)
} else if sorter.GetPkSort() != nil {
pkSort := &search.PrimaryKeySort{
Order: search.ParseSortOrder(sorter.GetPkSort().Order),
}
indexSort.Sorters = append(indexSort.Sorters, pkSort)
} else {
return nil, errors.New("unknown index sort type")
}
}
return indexSort, nil
}
func parseFromPbSchema(pbSchema *otsprotocol.IndexSchema) (*IndexSchema, error) {
schema := &IndexSchema{
IndexSetting: &IndexSetting{
RoutingFields: pbSchema.IndexSetting.RoutingFields,
},
}
schema.FieldSchemas = parseFieldSchemaFromPb(pbSchema.GetFieldSchemas())
indexSort, err := parseIndexSortFromPb(pbSchema.GetIndexSort())
if err != nil {
return nil, err
}
schema.IndexSort = indexSort
return schema, nil
}
type IndexSchema struct {
IndexSetting *IndexSetting
FieldSchemas []*FieldSchema
IndexSort *search.Sort
}
type FieldType int32
const (
FieldType_LONG FieldType = 1
FieldType_DOUBLE FieldType = 2
FieldType_BOOLEAN FieldType = 3
FieldType_KEYWORD FieldType = 4
FieldType_TEXT FieldType = 5
FieldType_NESTED FieldType = 6
FieldType_GEO_POINT FieldType = 7
)
type IndexOptions int32
const (
IndexOptions_DOCS IndexOptions = 1
IndexOptions_FREQS IndexOptions = 2
IndexOptions_POSITIONS IndexOptions = 3
IndexOptions_OFFSETS IndexOptions = 4
)
type Analyzer string
const (
Analyzer_SingleWord Analyzer = "single_word"
Analyzer_MaxWord Analyzer = "max_word"
)
type FieldSchema struct {
FieldName *string
FieldType FieldType
Index *bool
IndexOptions *IndexOptions
Analyzer *Analyzer
EnableSortAndAgg *bool
Store *bool
IsArray *bool
FieldSchemas []*FieldSchema
}
func (fs *FieldSchema) String() string {
out, err := json.Marshal(fs)
if err != nil {
panic(err)
}
return string(out)
}
type IndexSetting struct {
RoutingFields []string
}
type CreateSearchIndexRequest struct {
TableName string
IndexName string
IndexSchema *IndexSchema
}
type CreateSearchIndexResponse struct {
ResponseInfo ResponseInfo
}
type DescribeSearchIndexRequest struct {
TableName string
IndexName string
}
type SyncPhase int32
const (
SyncPhase_FULL SyncPhase = 1
SyncPhase_INCR SyncPhase = 2
)
type SyncStat struct {
SyncPhase SyncPhase
CurrentSyncTimestamp *int64
}
type DescribeSearchIndexResponse struct {
Schema *IndexSchema
SyncStat *SyncStat
ResponseInfo ResponseInfo
}
type ListSearchIndexRequest struct {
TableName string
}
type IndexInfo struct {
TableName string
IndexName string
}
type ListSearchIndexResponse struct {
IndexInfo []*IndexInfo
ResponseInfo ResponseInfo
}
type DeleteSearchIndexRequest struct {
TableName string
IndexName string
}
type DeleteSearchIndexResponse struct {
ResponseInfo ResponseInfo
}

Some files were not shown because too many files have changed in this diff Show More