2015-06-04 02:05:02 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2015-12-19 17:42:10 +01:00
|
|
|
"strings"
|
2015-06-04 02:05:02 +02:00
|
|
|
"time"
|
|
|
|
|
2016-01-04 23:56:26 +01:00
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
2015-06-04 02:05:02 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
|
|
|
2015-06-04 02:12:41 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
2015-06-24 16:23:16 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
2015-06-04 02:12:41 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/dynamodb"
|
2015-06-04 02:05:02 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/hashcode"
|
|
|
|
)
|
|
|
|
|
2015-06-24 16:23:16 +02:00
|
|
|
// Number of times to retry if a throttling-related exception occurs
|
|
|
|
const DYNAMODB_MAX_THROTTLE_RETRIES = 5
|
|
|
|
|
|
|
|
// How long to sleep when a throttle-event happens
|
|
|
|
const DYNAMODB_THROTTLE_SLEEP = 5 * time.Second
|
|
|
|
|
|
|
|
// How long to sleep if a limit-exceeded event happens
|
|
|
|
const DYNAMODB_LIMIT_EXCEEDED_SLEEP = 10 * time.Second
|
|
|
|
|
2015-06-04 02:05:02 +02:00
|
|
|
// A number of these are marked as computed because if you don't
|
|
|
|
// provide a value, DynamoDB will provide you with defaults (which are the
|
|
|
|
// default values specified below)
|
|
|
|
func resourceAwsDynamoDbTable() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceAwsDynamoDbTableCreate,
|
|
|
|
Read: resourceAwsDynamoDbTableRead,
|
|
|
|
Update: resourceAwsDynamoDbTableUpdate,
|
|
|
|
Delete: resourceAwsDynamoDbTableDelete,
|
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
2015-08-05 21:43:26 +02:00
|
|
|
"arn": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2015-06-04 02:05:02 +02:00
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"hash_key": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"range_key": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"write_capacity": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"read_capacity": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"attribute": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Set: func(v interface{}) int {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"local_secondary_index": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"range_key": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"projection_type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"non_key_attributes": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Set: func(v interface{}) int {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"global_secondary_index": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"write_capacity": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"read_capacity": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"hash_key": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"range_key": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
2015-06-16 02:05:50 +02:00
|
|
|
Optional: true,
|
2015-06-04 02:05:02 +02:00
|
|
|
},
|
|
|
|
"projection_type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"non_key_attributes": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// GSI names are the uniqueness constraint
|
|
|
|
Set: func(v interface{}) int {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
|
2015-06-16 02:05:50 +02:00
|
|
|
buf.WriteString(fmt.Sprintf("%d-", m["write_capacity"].(int)))
|
|
|
|
buf.WriteString(fmt.Sprintf("%d-", m["read_capacity"].(int)))
|
2015-06-04 02:05:02 +02:00
|
|
|
return hashcode.String(buf.String())
|
|
|
|
},
|
|
|
|
},
|
2015-12-08 15:07:11 +01:00
|
|
|
"stream_enabled": &schema.Schema{
|
2015-12-17 18:24:24 +01:00
|
|
|
Type: schema.TypeBool,
|
2015-12-08 15:07:11 +01:00
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
"stream_view_type": &schema.Schema{
|
2015-12-17 18:24:24 +01:00
|
|
|
Type: schema.TypeString,
|
2015-12-08 15:07:11 +01:00
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
StateFunc: func(v interface{}) string {
|
|
|
|
value := v.(string)
|
|
|
|
return strings.ToUpper(value)
|
|
|
|
},
|
2015-12-08 21:58:06 +01:00
|
|
|
ValidateFunc: validateStreamViewType,
|
2015-12-08 15:07:11 +01:00
|
|
|
},
|
2015-06-04 02:05:02 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
dynamodbconn := meta.(*AWSClient).dynamodbconn
|
|
|
|
|
|
|
|
name := d.Get("name").(string)
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] DynamoDB table create: %s", name)
|
|
|
|
|
|
|
|
throughput := &dynamodb.ProvisionedThroughput{
|
2015-07-28 22:29:46 +02:00
|
|
|
ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))),
|
|
|
|
WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))),
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
hash_key_name := d.Get("hash_key").(string)
|
|
|
|
keyschema := []*dynamodb.KeySchemaElement{
|
|
|
|
&dynamodb.KeySchemaElement{
|
|
|
|
AttributeName: aws.String(hash_key_name),
|
|
|
|
KeyType: aws.String("HASH"),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if range_key, ok := d.GetOk("range_key"); ok {
|
|
|
|
range_schema_element := &dynamodb.KeySchemaElement{
|
|
|
|
AttributeName: aws.String(range_key.(string)),
|
|
|
|
KeyType: aws.String("RANGE"),
|
|
|
|
}
|
|
|
|
keyschema = append(keyschema, range_schema_element)
|
|
|
|
}
|
|
|
|
|
|
|
|
req := &dynamodb.CreateTableInput{
|
|
|
|
TableName: aws.String(name),
|
|
|
|
ProvisionedThroughput: throughput,
|
|
|
|
KeySchema: keyschema,
|
|
|
|
}
|
|
|
|
|
|
|
|
if attributedata, ok := d.GetOk("attribute"); ok {
|
|
|
|
attributes := []*dynamodb.AttributeDefinition{}
|
|
|
|
attributeSet := attributedata.(*schema.Set)
|
|
|
|
for _, attribute := range attributeSet.List() {
|
|
|
|
attr := attribute.(map[string]interface{})
|
|
|
|
attributes = append(attributes, &dynamodb.AttributeDefinition{
|
|
|
|
AttributeName: aws.String(attr["name"].(string)),
|
|
|
|
AttributeType: aws.String(attr["type"].(string)),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
req.AttributeDefinitions = attributes
|
|
|
|
}
|
|
|
|
|
|
|
|
if lsidata, ok := d.GetOk("local_secondary_index"); ok {
|
|
|
|
fmt.Printf("[DEBUG] Adding LSI data to the table")
|
|
|
|
|
|
|
|
lsiSet := lsidata.(*schema.Set)
|
|
|
|
localSecondaryIndexes := []*dynamodb.LocalSecondaryIndex{}
|
|
|
|
for _, lsiObject := range lsiSet.List() {
|
|
|
|
lsi := lsiObject.(map[string]interface{})
|
|
|
|
|
|
|
|
projection := &dynamodb.Projection{
|
|
|
|
ProjectionType: aws.String(lsi["projection_type"].(string)),
|
|
|
|
}
|
|
|
|
|
2015-06-24 03:24:15 +02:00
|
|
|
if lsi["projection_type"] == "INCLUDE" {
|
2015-06-04 02:05:02 +02:00
|
|
|
non_key_attributes := []*string{}
|
|
|
|
for _, attr := range lsi["non_key_attributes"].([]interface{}) {
|
|
|
|
non_key_attributes = append(non_key_attributes, aws.String(attr.(string)))
|
|
|
|
}
|
|
|
|
projection.NonKeyAttributes = non_key_attributes
|
|
|
|
}
|
|
|
|
|
|
|
|
localSecondaryIndexes = append(localSecondaryIndexes, &dynamodb.LocalSecondaryIndex{
|
|
|
|
IndexName: aws.String(lsi["name"].(string)),
|
|
|
|
KeySchema: []*dynamodb.KeySchemaElement{
|
|
|
|
&dynamodb.KeySchemaElement{
|
|
|
|
AttributeName: aws.String(hash_key_name),
|
|
|
|
KeyType: aws.String("HASH"),
|
|
|
|
},
|
|
|
|
&dynamodb.KeySchemaElement{
|
|
|
|
AttributeName: aws.String(lsi["range_key"].(string)),
|
|
|
|
KeyType: aws.String("RANGE"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Projection: projection,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
req.LocalSecondaryIndexes = localSecondaryIndexes
|
|
|
|
|
|
|
|
fmt.Printf("[DEBUG] Added %d LSI definitions", len(localSecondaryIndexes))
|
|
|
|
}
|
|
|
|
|
|
|
|
if gsidata, ok := d.GetOk("global_secondary_index"); ok {
|
|
|
|
globalSecondaryIndexes := []*dynamodb.GlobalSecondaryIndex{}
|
|
|
|
|
|
|
|
gsiSet := gsidata.(*schema.Set)
|
|
|
|
for _, gsiObject := range gsiSet.List() {
|
|
|
|
gsi := gsiObject.(map[string]interface{})
|
|
|
|
gsiObject := createGSIFromData(&gsi)
|
|
|
|
globalSecondaryIndexes = append(globalSecondaryIndexes, &gsiObject)
|
|
|
|
}
|
|
|
|
req.GlobalSecondaryIndexes = globalSecondaryIndexes
|
|
|
|
}
|
|
|
|
|
2015-12-08 15:07:11 +01:00
|
|
|
if _, ok := d.GetOk("stream_enabled"); ok {
|
2015-12-17 18:24:24 +01:00
|
|
|
|
2015-12-08 15:07:11 +01:00
|
|
|
req.StreamSpecification = &dynamodb.StreamSpecification{
|
2015-12-17 18:24:24 +01:00
|
|
|
StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
|
2015-12-08 15:07:11 +01:00
|
|
|
StreamViewType: aws.String(d.Get("stream_view_type").(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("[DEBUG] Adding StreamSpecifications to the table")
|
|
|
|
}
|
|
|
|
|
2015-06-24 16:23:16 +02:00
|
|
|
attemptCount := 1
|
|
|
|
for attemptCount <= DYNAMODB_MAX_THROTTLE_RETRIES {
|
|
|
|
output, err := dynamodbconn.CreateTable(req)
|
|
|
|
if err != nil {
|
|
|
|
if awsErr, ok := err.(awserr.Error); ok {
|
|
|
|
if awsErr.Code() == "ThrottlingException" {
|
|
|
|
log.Printf("[DEBUG] Attempt %d/%d: Sleeping for a bit to throttle back create request", attemptCount, DYNAMODB_MAX_THROTTLE_RETRIES)
|
|
|
|
time.Sleep(DYNAMODB_THROTTLE_SLEEP)
|
|
|
|
attemptCount += 1
|
|
|
|
} else if awsErr.Code() == "LimitExceededException" {
|
2015-06-24 16:36:14 +02:00
|
|
|
log.Printf("[DEBUG] Limit on concurrent table creations hit, sleeping for a bit")
|
2015-06-24 16:23:16 +02:00
|
|
|
time.Sleep(DYNAMODB_LIMIT_EXCEEDED_SLEEP)
|
2015-06-24 16:36:14 +02:00
|
|
|
attemptCount += 1
|
2015-06-24 16:23:16 +02:00
|
|
|
} else {
|
|
|
|
// Some other non-retryable exception occurred
|
|
|
|
return fmt.Errorf("AWS Error creating DynamoDB table: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Non-AWS exception occurred, give up
|
|
|
|
return fmt.Errorf("Error creating DynamoDB table: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// No error, set ID and return
|
|
|
|
d.SetId(*output.TableDescription.TableName)
|
2015-10-14 19:06:09 +02:00
|
|
|
if err := d.Set("arn", *output.TableDescription.TableArn); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-10-14 20:16:58 +02:00
|
|
|
return resourceAwsDynamoDbTableRead(d, meta)
|
2015-06-24 16:23:16 +02:00
|
|
|
}
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
|
2015-06-24 16:23:16 +02:00
|
|
|
// Too many throttling events occurred, give up
|
|
|
|
return fmt.Errorf("Unable to create DynamoDB table '%s' after %d attempts", name, attemptCount)
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Updating DynamoDB table %s", d.Id())
|
|
|
|
dynamodbconn := meta.(*AWSClient).dynamodbconn
|
|
|
|
|
|
|
|
// Ensure table is active before trying to update
|
|
|
|
waitForTableToBeActive(d.Id(), meta)
|
|
|
|
|
|
|
|
// LSI can only be done at create-time, abort if it's been changed
|
|
|
|
if d.HasChange("local_secondary_index") {
|
|
|
|
return fmt.Errorf("Local secondary indexes can only be built at creation, you cannot update them!")
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.HasChange("hash_key") {
|
|
|
|
return fmt.Errorf("Hash key can only be specified at creation, you cannot modify it.")
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.HasChange("range_key") {
|
|
|
|
return fmt.Errorf("Range key can only be specified at creation, you cannot modify it.")
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.HasChange("read_capacity") || d.HasChange("write_capacity") {
|
|
|
|
req := &dynamodb.UpdateTableInput{
|
|
|
|
TableName: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
throughput := &dynamodb.ProvisionedThroughput{
|
2015-07-28 22:29:46 +02:00
|
|
|
ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))),
|
|
|
|
WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))),
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
req.ProvisionedThroughput = throughput
|
|
|
|
|
|
|
|
_, err := dynamodbconn.UpdateTable(req)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
waitForTableToBeActive(d.Id(), meta)
|
|
|
|
}
|
|
|
|
|
2015-12-08 15:07:11 +01:00
|
|
|
if d.HasChange("stream_enabled") || d.HasChange("stream_view_type") {
|
|
|
|
req := &dynamodb.UpdateTableInput{
|
|
|
|
TableName: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
req.StreamSpecification = &dynamodb.StreamSpecification{
|
2015-12-17 18:24:24 +01:00
|
|
|
StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
|
2015-12-08 15:07:11 +01:00
|
|
|
StreamViewType: aws.String(d.Get("stream_view_type").(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := dynamodbconn.UpdateTable(req)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
waitForTableToBeActive(d.Id(), meta)
|
|
|
|
}
|
|
|
|
|
2015-06-04 02:05:02 +02:00
|
|
|
if d.HasChange("global_secondary_index") {
|
2015-06-16 02:05:50 +02:00
|
|
|
log.Printf("[DEBUG] Changed GSI data")
|
2015-06-04 02:05:02 +02:00
|
|
|
req := &dynamodb.UpdateTableInput{
|
|
|
|
TableName: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
o, n := d.GetChange("global_secondary_index")
|
|
|
|
|
|
|
|
oldSet := o.(*schema.Set)
|
|
|
|
newSet := n.(*schema.Set)
|
2015-06-16 02:05:50 +02:00
|
|
|
|
|
|
|
// Track old names so we can know which ones we need to just update based on
|
|
|
|
// capacity changes, terraform appears to only diff on the set hash, not the
|
|
|
|
// contents so we need to make sure we don't delete any indexes that we
|
|
|
|
// just want to update the capacity for
|
|
|
|
oldGsiNameSet := make(map[string]bool)
|
|
|
|
newGsiNameSet := make(map[string]bool)
|
|
|
|
|
|
|
|
for _, gsidata := range oldSet.List() {
|
|
|
|
gsiName := gsidata.(map[string]interface{})["name"].(string)
|
|
|
|
oldGsiNameSet[gsiName] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, gsidata := range newSet.List() {
|
|
|
|
gsiName := gsidata.(map[string]interface{})["name"].(string)
|
|
|
|
newGsiNameSet[gsiName] = true
|
|
|
|
}
|
2015-06-04 02:05:02 +02:00
|
|
|
|
|
|
|
// First determine what's new
|
|
|
|
for _, newgsidata := range newSet.List() {
|
|
|
|
updates := []*dynamodb.GlobalSecondaryIndexUpdate{}
|
2015-06-16 02:05:50 +02:00
|
|
|
newGsiName := newgsidata.(map[string]interface{})["name"].(string)
|
|
|
|
if _, exists := oldGsiNameSet[newGsiName]; !exists {
|
2015-06-04 02:05:02 +02:00
|
|
|
attributes := []*dynamodb.AttributeDefinition{}
|
|
|
|
gsidata := newgsidata.(map[string]interface{})
|
|
|
|
gsi := createGSIFromData(&gsidata)
|
|
|
|
log.Printf("[DEBUG] Adding GSI %s", *gsi.IndexName)
|
|
|
|
update := &dynamodb.GlobalSecondaryIndexUpdate{
|
|
|
|
Create: &dynamodb.CreateGlobalSecondaryIndexAction{
|
|
|
|
IndexName: gsi.IndexName,
|
|
|
|
KeySchema: gsi.KeySchema,
|
|
|
|
ProvisionedThroughput: gsi.ProvisionedThroughput,
|
|
|
|
Projection: gsi.Projection,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
updates = append(updates, update)
|
|
|
|
|
2015-06-16 02:05:50 +02:00
|
|
|
// Hash key is required, range key isn't
|
2015-10-08 14:48:04 +02:00
|
|
|
hashkey_type, err := getAttributeType(d, *gsi.KeySchema[0].AttributeName)
|
2015-06-04 02:05:02 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes = append(attributes, &dynamodb.AttributeDefinition{
|
|
|
|
AttributeName: gsi.KeySchema[0].AttributeName,
|
|
|
|
AttributeType: aws.String(hashkey_type),
|
|
|
|
})
|
2015-06-16 02:05:50 +02:00
|
|
|
|
|
|
|
// If there's a range key, there will be 2 elements in KeySchema
|
|
|
|
if len(gsi.KeySchema) == 2 {
|
2015-10-08 14:48:04 +02:00
|
|
|
rangekey_type, err := getAttributeType(d, *gsi.KeySchema[1].AttributeName)
|
2015-06-16 02:05:50 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes = append(attributes, &dynamodb.AttributeDefinition{
|
|
|
|
AttributeName: gsi.KeySchema[1].AttributeName,
|
|
|
|
AttributeType: aws.String(rangekey_type),
|
|
|
|
})
|
|
|
|
}
|
2015-06-04 02:05:02 +02:00
|
|
|
|
|
|
|
req.AttributeDefinitions = attributes
|
|
|
|
req.GlobalSecondaryIndexUpdates = updates
|
|
|
|
_, err = dynamodbconn.UpdateTable(req)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
waitForTableToBeActive(d.Id(), meta)
|
|
|
|
waitForGSIToBeActive(d.Id(), *gsi.IndexName, meta)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, oldgsidata := range oldSet.List() {
|
|
|
|
updates := []*dynamodb.GlobalSecondaryIndexUpdate{}
|
2015-06-16 02:05:50 +02:00
|
|
|
oldGsiName := oldgsidata.(map[string]interface{})["name"].(string)
|
|
|
|
if _, exists := newGsiNameSet[oldGsiName]; !exists {
|
2015-06-04 02:05:02 +02:00
|
|
|
gsidata := oldgsidata.(map[string]interface{})
|
|
|
|
log.Printf("[DEBUG] Deleting GSI %s", gsidata["name"].(string))
|
|
|
|
update := &dynamodb.GlobalSecondaryIndexUpdate{
|
|
|
|
Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{
|
|
|
|
IndexName: aws.String(gsidata["name"].(string)),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
updates = append(updates, update)
|
|
|
|
|
|
|
|
req.GlobalSecondaryIndexUpdates = updates
|
|
|
|
_, err := dynamodbconn.UpdateTable(req)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
waitForTableToBeActive(d.Id(), meta)
|
|
|
|
}
|
|
|
|
}
|
2015-06-16 02:05:50 +02:00
|
|
|
}
|
2015-06-04 02:05:02 +02:00
|
|
|
|
2015-06-16 02:05:50 +02:00
|
|
|
// Update any out-of-date read / write capacity
|
|
|
|
if gsiObjects, ok := d.GetOk("global_secondary_index"); ok {
|
|
|
|
gsiSet := gsiObjects.(*schema.Set)
|
|
|
|
if len(gsiSet.List()) > 0 {
|
|
|
|
log.Printf("Updating capacity as needed!")
|
2015-06-04 02:05:02 +02:00
|
|
|
|
2015-06-16 02:05:50 +02:00
|
|
|
// We can only change throughput, but we need to make sure it's actually changed
|
|
|
|
tableDescription, err := dynamodbconn.DescribeTable(&dynamodb.DescribeTableInput{
|
|
|
|
TableName: aws.String(d.Id()),
|
|
|
|
})
|
2015-06-04 02:05:02 +02:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-06-16 02:05:50 +02:00
|
|
|
|
|
|
|
table := tableDescription.Table
|
|
|
|
|
|
|
|
updates := []*dynamodb.GlobalSecondaryIndexUpdate{}
|
|
|
|
|
|
|
|
for _, updatedgsidata := range gsiSet.List() {
|
|
|
|
gsidata := updatedgsidata.(map[string]interface{})
|
|
|
|
gsiName := gsidata["name"].(string)
|
|
|
|
gsiWriteCapacity := gsidata["write_capacity"].(int)
|
|
|
|
gsiReadCapacity := gsidata["read_capacity"].(int)
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Updating GSI %s", gsiName)
|
|
|
|
gsi, err := getGlobalSecondaryIndex(gsiName, table.GlobalSecondaryIndexes)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
capacityUpdated := false
|
|
|
|
|
2015-10-08 14:48:04 +02:00
|
|
|
if int64(gsiReadCapacity) != *gsi.ProvisionedThroughput.ReadCapacityUnits ||
|
|
|
|
int64(gsiWriteCapacity) != *gsi.ProvisionedThroughput.WriteCapacityUnits {
|
2015-06-16 02:05:50 +02:00
|
|
|
capacityUpdated = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if capacityUpdated {
|
|
|
|
update := &dynamodb.GlobalSecondaryIndexUpdate{
|
|
|
|
Update: &dynamodb.UpdateGlobalSecondaryIndexAction{
|
|
|
|
IndexName: aws.String(gsidata["name"].(string)),
|
|
|
|
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
|
2015-07-28 22:29:46 +02:00
|
|
|
WriteCapacityUnits: aws.Int64(int64(gsiWriteCapacity)),
|
|
|
|
ReadCapacityUnits: aws.Int64(int64(gsiReadCapacity)),
|
2015-06-16 02:05:50 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
updates = append(updates, update)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(updates) > 0 {
|
|
|
|
|
|
|
|
req := &dynamodb.UpdateTableInput{
|
|
|
|
TableName: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
req.GlobalSecondaryIndexUpdates = updates
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Updating GSI read / write capacity on %s", d.Id())
|
|
|
|
_, err := dynamodbconn.UpdateTable(req)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("[DEBUG] Error updating table: %s", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
2015-06-16 02:05:50 +02:00
|
|
|
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return resourceAwsDynamoDbTableRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
dynamodbconn := meta.(*AWSClient).dynamodbconn
|
2015-06-16 02:05:50 +02:00
|
|
|
log.Printf("[DEBUG] Loading data for DynamoDB table '%s'", d.Id())
|
2015-06-04 02:05:02 +02:00
|
|
|
req := &dynamodb.DescribeTableInput{
|
|
|
|
TableName: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
result, err := dynamodbconn.DescribeTable(req)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
table := result.Table
|
|
|
|
|
|
|
|
d.Set("write_capacity", table.ProvisionedThroughput.WriteCapacityUnits)
|
|
|
|
d.Set("read_capacity", table.ProvisionedThroughput.ReadCapacityUnits)
|
|
|
|
|
|
|
|
attributes := []interface{}{}
|
|
|
|
for _, attrdef := range table.AttributeDefinitions {
|
2015-06-16 02:05:50 +02:00
|
|
|
attribute := map[string]string{
|
2015-10-08 14:48:04 +02:00
|
|
|
"name": *attrdef.AttributeName,
|
|
|
|
"type": *attrdef.AttributeType,
|
2015-06-16 02:05:50 +02:00
|
|
|
}
|
2015-06-04 02:05:02 +02:00
|
|
|
attributes = append(attributes, attribute)
|
2015-06-16 02:05:50 +02:00
|
|
|
log.Printf("[DEBUG] Added Attribute: %s", attribute["name"])
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
d.Set("attribute", attributes)
|
|
|
|
|
2015-06-16 02:05:50 +02:00
|
|
|
gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes))
|
2015-06-04 02:05:02 +02:00
|
|
|
for _, gsiObject := range table.GlobalSecondaryIndexes {
|
2015-06-16 02:05:50 +02:00
|
|
|
gsi := map[string]interface{}{
|
2015-10-08 14:48:04 +02:00
|
|
|
"write_capacity": *gsiObject.ProvisionedThroughput.WriteCapacityUnits,
|
|
|
|
"read_capacity": *gsiObject.ProvisionedThroughput.ReadCapacityUnits,
|
|
|
|
"name": *gsiObject.IndexName,
|
2015-06-16 02:05:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, attribute := range gsiObject.KeySchema {
|
|
|
|
if *attribute.KeyType == "HASH" {
|
|
|
|
gsi["hash_key"] = *attribute.AttributeName
|
|
|
|
}
|
|
|
|
|
|
|
|
if *attribute.KeyType == "RANGE" {
|
|
|
|
gsi["range_key"] = *attribute.AttributeName
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-22 23:39:49 +02:00
|
|
|
gsi["projection_type"] = *(gsiObject.Projection.ProjectionType)
|
|
|
|
|
|
|
|
nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes))
|
|
|
|
for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes {
|
|
|
|
nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr)
|
|
|
|
}
|
|
|
|
gsi["non_key_attributes"] = nonKeyAttrs
|
2015-06-16 02:05:50 +02:00
|
|
|
|
2015-06-04 02:05:02 +02:00
|
|
|
gsiList = append(gsiList, gsi)
|
2015-06-16 02:05:50 +02:00
|
|
|
log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"])
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
|
2015-12-08 15:07:11 +01:00
|
|
|
if table.StreamSpecification != nil {
|
|
|
|
d.Set("stream_view_type", table.StreamSpecification.StreamViewType)
|
|
|
|
d.Set("stream_enabled", table.StreamSpecification.StreamEnabled)
|
|
|
|
}
|
|
|
|
|
2015-09-22 23:39:49 +02:00
|
|
|
err = d.Set("global_secondary_index", gsiList)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-17 20:27:16 +02:00
|
|
|
d.Set("arn", table.TableArn)
|
2015-06-04 02:05:02 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
dynamodbconn := meta.(*AWSClient).dynamodbconn
|
|
|
|
|
|
|
|
waitForTableToBeActive(d.Id(), meta)
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] DynamoDB delete table: %s", d.Id())
|
|
|
|
|
|
|
|
_, err := dynamodbconn.DeleteTable(&dynamodb.DeleteTableInput{
|
|
|
|
TableName: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-01-04 23:56:26 +01:00
|
|
|
|
|
|
|
params := &dynamodb.DescribeTableInput{
|
|
|
|
TableName: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
err = resource.Retry(10*time.Minute, func() error {
|
|
|
|
t, err := dynamodbconn.DescribeTable(params)
|
|
|
|
if err != nil {
|
|
|
|
if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Didn't recognize the error, so shouldn't retry.
|
|
|
|
return resource.RetryError{Err: err}
|
|
|
|
}
|
|
|
|
|
|
|
|
if t != nil {
|
|
|
|
if t.Table.TableStatus != nil && strings.ToLower(*t.Table.TableStatus) == "deleting" {
|
|
|
|
log.Printf("[DEBUG] AWS Dynamo DB table (%s) is still deleting", d.Id())
|
|
|
|
return fmt.Errorf("still deleting")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we should be not found or deleting, so error here
|
|
|
|
return resource.RetryError{Err: fmt.Errorf("[ERR] Error deleting Dynamo DB table, unexpected state: %s", t)}
|
|
|
|
})
|
|
|
|
|
|
|
|
// check error from retry
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-06-04 02:05:02 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryIndex {
|
|
|
|
|
|
|
|
projection := &dynamodb.Projection{
|
|
|
|
ProjectionType: aws.String((*data)["projection_type"].(string)),
|
|
|
|
}
|
|
|
|
|
2015-06-24 03:24:15 +02:00
|
|
|
if (*data)["projection_type"] == "INCLUDE" {
|
2015-06-04 02:05:02 +02:00
|
|
|
non_key_attributes := []*string{}
|
|
|
|
for _, attr := range (*data)["non_key_attributes"].([]interface{}) {
|
|
|
|
non_key_attributes = append(non_key_attributes, aws.String(attr.(string)))
|
|
|
|
}
|
|
|
|
projection.NonKeyAttributes = non_key_attributes
|
|
|
|
}
|
|
|
|
|
|
|
|
writeCapacity := (*data)["write_capacity"].(int)
|
|
|
|
readCapacity := (*data)["read_capacity"].(int)
|
|
|
|
|
2015-06-16 02:05:50 +02:00
|
|
|
key_schema := []*dynamodb.KeySchemaElement{
|
|
|
|
&dynamodb.KeySchemaElement{
|
|
|
|
AttributeName: aws.String((*data)["hash_key"].(string)),
|
|
|
|
KeyType: aws.String("HASH"),
|
2015-06-04 02:05:02 +02:00
|
|
|
},
|
2015-06-16 02:05:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
range_key_name := (*data)["range_key"]
|
|
|
|
if range_key_name != "" {
|
|
|
|
range_key_element := &dynamodb.KeySchemaElement{
|
|
|
|
AttributeName: aws.String(range_key_name.(string)),
|
|
|
|
KeyType: aws.String("RANGE"),
|
|
|
|
}
|
|
|
|
|
|
|
|
key_schema = append(key_schema, range_key_element)
|
|
|
|
}
|
|
|
|
|
|
|
|
return dynamodb.GlobalSecondaryIndex{
|
|
|
|
IndexName: aws.String((*data)["name"].(string)),
|
|
|
|
KeySchema: key_schema,
|
2015-06-04 02:05:02 +02:00
|
|
|
Projection: projection,
|
|
|
|
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
|
2015-07-28 22:29:46 +02:00
|
|
|
WriteCapacityUnits: aws.Int64(int64(writeCapacity)),
|
|
|
|
ReadCapacityUnits: aws.Int64(int64(readCapacity)),
|
2015-06-04 02:05:02 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-16 02:05:50 +02:00
|
|
|
func getGlobalSecondaryIndex(indexName string, indexList []*dynamodb.GlobalSecondaryIndexDescription) (*dynamodb.GlobalSecondaryIndexDescription, error) {
|
|
|
|
for _, gsi := range indexList {
|
2015-10-08 14:48:04 +02:00
|
|
|
if *gsi.IndexName == indexName {
|
2015-06-16 02:05:50 +02:00
|
|
|
return gsi, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &dynamodb.GlobalSecondaryIndexDescription{}, fmt.Errorf("Can't find a GSI by that name...")
|
|
|
|
}
|
|
|
|
|
2015-06-04 02:05:02 +02:00
|
|
|
func getAttributeType(d *schema.ResourceData, attributeName string) (string, error) {
|
|
|
|
if attributedata, ok := d.GetOk("attribute"); ok {
|
|
|
|
attributeSet := attributedata.(*schema.Set)
|
|
|
|
for _, attribute := range attributeSet.List() {
|
|
|
|
attr := attribute.(map[string]interface{})
|
|
|
|
if attr["name"] == attributeName {
|
|
|
|
return attr["type"].(string), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", fmt.Errorf("Unable to find an attribute named %s", attributeName)
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitForGSIToBeActive(tableName string, gsiName string, meta interface{}) error {
|
|
|
|
dynamodbconn := meta.(*AWSClient).dynamodbconn
|
|
|
|
req := &dynamodb.DescribeTableInput{
|
|
|
|
TableName: aws.String(tableName),
|
|
|
|
}
|
|
|
|
|
|
|
|
activeIndex := false
|
|
|
|
|
|
|
|
for activeIndex == false {
|
|
|
|
|
|
|
|
result, err := dynamodbconn.DescribeTable(req)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
table := result.Table
|
|
|
|
var targetGSI *dynamodb.GlobalSecondaryIndexDescription = nil
|
|
|
|
|
|
|
|
for _, gsi := range table.GlobalSecondaryIndexes {
|
|
|
|
if *gsi.IndexName == gsiName {
|
|
|
|
targetGSI = gsi
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if targetGSI != nil {
|
|
|
|
activeIndex = *targetGSI.IndexStatus == "ACTIVE"
|
|
|
|
|
|
|
|
if !activeIndex {
|
2015-06-09 01:04:22 +02:00
|
|
|
log.Printf("[DEBUG] Sleeping for 5 seconds for %s GSI to become active", gsiName)
|
|
|
|
time.Sleep(5 * time.Second)
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Printf("[DEBUG] GSI %s did not exist, giving up", gsiName)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitForTableToBeActive(tableName string, meta interface{}) error {
|
|
|
|
dynamodbconn := meta.(*AWSClient).dynamodbconn
|
|
|
|
req := &dynamodb.DescribeTableInput{
|
|
|
|
TableName: aws.String(tableName),
|
|
|
|
}
|
|
|
|
|
|
|
|
activeState := false
|
|
|
|
|
|
|
|
for activeState == false {
|
|
|
|
result, err := dynamodbconn.DescribeTable(req)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-10-08 14:48:04 +02:00
|
|
|
activeState = *result.Table.TableStatus == "ACTIVE"
|
2015-06-04 02:05:02 +02:00
|
|
|
|
|
|
|
// Wait for a few seconds
|
|
|
|
if !activeState {
|
2015-06-16 02:05:50 +02:00
|
|
|
log.Printf("[DEBUG] Sleeping for 5 seconds for table to become active")
|
|
|
|
time.Sleep(5 * time.Second)
|
2015-06-04 02:05:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|