2015-06-01 18:33:22 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"time"
|
|
|
|
|
2015-06-09 21:12:47 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
|
|
"github.com/aws/aws-sdk-go/service/lambda"
|
2015-06-09 21:27:40 +02:00
|
|
|
"github.com/mitchellh/go-homedir"
|
2015-06-01 18:33:22 +02:00
|
|
|
|
2015-11-06 22:27:47 +01:00
|
|
|
"errors"
|
|
|
|
|
2016-01-05 18:22:57 +01:00
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
2015-06-01 18:33:22 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
|
|
)
|
|
|
|
|
2016-10-27 21:41:20 +02:00
|
|
|
const awsMutexLambdaKey = `aws_lambda_function`
|
|
|
|
|
2015-06-01 18:33:22 +02:00
|
|
|
func resourceAwsLambdaFunction() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceAwsLambdaFunctionCreate,
|
|
|
|
Read: resourceAwsLambdaFunctionRead,
|
|
|
|
Update: resourceAwsLambdaFunctionUpdate,
|
|
|
|
Delete: resourceAwsLambdaFunctionDelete,
|
|
|
|
|
2016-07-16 16:36:28 +02:00
|
|
|
Importer: &schema.ResourceImporter{
|
|
|
|
State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
|
|
d.Set("function_name", d.Id())
|
|
|
|
return []*schema.ResourceData{d}, nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2015-06-01 18:33:22 +02:00
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"filename": &schema.Schema{
|
2015-11-06 22:27:47 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ConflictsWith: []string{"s3_bucket", "s3_key", "s3_object_version"},
|
|
|
|
},
|
|
|
|
"s3_bucket": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ConflictsWith: []string{"filename"},
|
|
|
|
},
|
|
|
|
"s3_key": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ConflictsWith: []string{"filename"},
|
|
|
|
},
|
|
|
|
"s3_object_version": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ConflictsWith: []string{"filename"},
|
2015-06-01 18:33:22 +02:00
|
|
|
},
|
|
|
|
"description": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"function_name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"handler": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"memory_size": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Default: 128,
|
|
|
|
},
|
|
|
|
"role": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"runtime": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Default: "nodejs",
|
|
|
|
},
|
|
|
|
"timeout": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Default: 3,
|
|
|
|
},
|
2016-09-04 10:55:13 +02:00
|
|
|
"publish": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
|
|
|
"version": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-02-16 06:38:17 +01:00
|
|
|
"vpc_config": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"subnet_ids": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: schema.HashString,
|
|
|
|
},
|
|
|
|
"security_group_ids": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: schema.HashString,
|
|
|
|
},
|
2016-04-13 15:47:51 +02:00
|
|
|
"vpc_id": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-02-16 06:38:17 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-06-01 18:33:22 +02:00
|
|
|
"arn": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-09-04 10:55:13 +02:00
|
|
|
"qualified_arn": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2015-06-01 18:33:22 +02:00
|
|
|
"last_modified": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
"source_code_hash": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
2016-02-19 13:13:26 +01:00
|
|
|
Optional: true,
|
2015-12-10 02:42:12 +01:00
|
|
|
Computed: true,
|
2015-06-01 18:33:22 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// resourceAwsLambdaFunction maps to:
|
|
|
|
// CreateFunction in the API / SDK
|
|
|
|
func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
conn := meta.(*AWSClient).lambdaconn
|
|
|
|
|
|
|
|
functionName := d.Get("function_name").(string)
|
|
|
|
iamRole := d.Get("role").(string)
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Creating Lambda Function %s with role %s", functionName, iamRole)
|
|
|
|
|
2015-11-06 22:27:47 +01:00
|
|
|
var functionCode *lambda.FunctionCode
|
|
|
|
if v, ok := d.GetOk("filename"); ok {
|
2016-10-27 21:41:20 +02:00
|
|
|
// Grab an exclusive lock so that we're only reading one function into
|
|
|
|
// memory at a time.
|
|
|
|
// See https://github.com/hashicorp/terraform/issues/9364
|
|
|
|
awsMutexKV.Lock(awsMutexLambdaKey)
|
|
|
|
defer awsMutexKV.Unlock(awsMutexLambdaKey)
|
2016-02-19 13:13:26 +01:00
|
|
|
file, err := loadFileContent(v.(string))
|
2015-11-06 22:27:47 +01:00
|
|
|
if err != nil {
|
2016-02-19 13:13:26 +01:00
|
|
|
return fmt.Errorf("Unable to load %q: %s", v.(string), err)
|
2015-11-06 22:27:47 +01:00
|
|
|
}
|
|
|
|
functionCode = &lambda.FunctionCode{
|
2016-02-19 13:13:26 +01:00
|
|
|
ZipFile: file,
|
2015-11-06 22:27:47 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s3Bucket, bucketOk := d.GetOk("s3_bucket")
|
|
|
|
s3Key, keyOk := d.GetOk("s3_key")
|
|
|
|
s3ObjectVersion, versionOk := d.GetOk("s3_object_version")
|
2016-01-21 01:33:14 +01:00
|
|
|
if !bucketOk || !keyOk {
|
|
|
|
return errors.New("s3_bucket and s3_key must all be set while using S3 code source")
|
2015-11-06 22:27:47 +01:00
|
|
|
}
|
|
|
|
functionCode = &lambda.FunctionCode{
|
2016-01-21 01:33:14 +01:00
|
|
|
S3Bucket: aws.String(s3Bucket.(string)),
|
|
|
|
S3Key: aws.String(s3Key.(string)),
|
|
|
|
}
|
|
|
|
if versionOk {
|
|
|
|
functionCode.S3ObjectVersion = aws.String(s3ObjectVersion.(string))
|
2015-11-06 22:27:47 +01:00
|
|
|
}
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
params := &lambda.CreateFunctionInput{
|
2015-11-06 22:27:47 +01:00
|
|
|
Code: functionCode,
|
2015-06-01 18:33:22 +02:00
|
|
|
Description: aws.String(d.Get("description").(string)),
|
|
|
|
FunctionName: aws.String(functionName),
|
|
|
|
Handler: aws.String(d.Get("handler").(string)),
|
2015-07-28 22:29:46 +02:00
|
|
|
MemorySize: aws.Int64(int64(d.Get("memory_size").(int))),
|
2015-06-01 18:33:22 +02:00
|
|
|
Role: aws.String(iamRole),
|
|
|
|
Runtime: aws.String(d.Get("runtime").(string)),
|
2015-07-28 22:29:46 +02:00
|
|
|
Timeout: aws.Int64(int64(d.Get("timeout").(int))),
|
2016-09-04 10:55:13 +02:00
|
|
|
Publish: aws.Bool(d.Get("publish").(bool)),
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
|
|
|
|
2016-02-16 06:38:17 +01:00
|
|
|
if v, ok := d.GetOk("vpc_config"); ok {
|
2016-02-18 22:45:32 +01:00
|
|
|
config, err := validateVPCConfig(v)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-16 06:38:17 +01:00
|
|
|
|
2016-08-15 18:52:42 +02:00
|
|
|
if config != nil {
|
|
|
|
var subnetIds []*string
|
|
|
|
for _, id := range config["subnet_ids"].(*schema.Set).List() {
|
|
|
|
subnetIds = append(subnetIds, aws.String(id.(string)))
|
|
|
|
}
|
2016-02-16 06:38:17 +01:00
|
|
|
|
2016-08-15 18:52:42 +02:00
|
|
|
var securityGroupIds []*string
|
|
|
|
for _, id := range config["security_group_ids"].(*schema.Set).List() {
|
|
|
|
securityGroupIds = append(securityGroupIds, aws.String(id.(string)))
|
|
|
|
}
|
2016-02-18 22:45:32 +01:00
|
|
|
|
2016-08-15 18:52:42 +02:00
|
|
|
params.VpcConfig = &lambda.VpcConfig{
|
|
|
|
SubnetIds: subnetIds,
|
|
|
|
SecurityGroupIds: securityGroupIds,
|
|
|
|
}
|
2016-02-16 06:38:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-05 18:22:57 +01:00
|
|
|
// IAM profiles can take ~10 seconds to propagate in AWS:
|
2016-02-18 22:45:32 +01:00
|
|
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
|
2016-01-05 18:22:57 +01:00
|
|
|
// Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda.
|
2016-11-15 00:43:22 +01:00
|
|
|
err := resource.Retry(10*time.Minute, func() *resource.RetryError {
|
2016-01-05 18:35:21 +01:00
|
|
|
_, err := conn.CreateFunction(params)
|
2016-01-05 18:22:57 +01:00
|
|
|
if err != nil {
|
2016-02-21 21:54:30 +01:00
|
|
|
log.Printf("[ERROR] Received %q, retrying CreateFunction", err)
|
2016-01-05 18:22:57 +01:00
|
|
|
if awserr, ok := err.(awserr.Error); ok {
|
|
|
|
if awserr.Code() == "InvalidParameterValueException" {
|
2016-02-16 06:38:17 +01:00
|
|
|
log.Printf("[DEBUG] InvalidParameterValueException creating Lambda Function: %s", awserr)
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.RetryableError(awserr)
|
2016-01-05 18:22:57 +01:00
|
|
|
}
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
2016-02-16 06:38:17 +01:00
|
|
|
log.Printf("[DEBUG] Error creating Lambda Function: %s", err)
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.NonRetryableError(err)
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
2016-01-05 18:22:57 +01:00
|
|
|
return nil
|
|
|
|
})
|
2015-06-01 18:33:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating Lambda function: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId(d.Get("function_name").(string))
|
|
|
|
|
|
|
|
return resourceAwsLambdaFunctionRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
// resourceAwsLambdaFunctionRead maps to:
|
|
|
|
// GetFunction in the API / SDK
|
|
|
|
func resourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
conn := meta.(*AWSClient).lambdaconn
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Fetching Lambda Function: %s", d.Id())
|
|
|
|
|
|
|
|
params := &lambda.GetFunctionInput{
|
|
|
|
FunctionName: aws.String(d.Get("function_name").(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
getFunctionOutput, err := conn.GetFunction(params)
|
|
|
|
if err != nil {
|
2016-07-27 17:47:25 +02:00
|
|
|
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" && !d.IsNewResource() {
|
2016-03-16 19:24:38 +01:00
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
2015-06-01 18:33:22 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFunctionOutput.Code.Location is a pre-signed URL pointing at the zip
|
|
|
|
// file that we uploaded when we created the resource. You can use it to
|
|
|
|
// download the code from AWS. The other part is
|
|
|
|
// getFunctionOutput.Configuration which holds metadata.
|
|
|
|
|
|
|
|
function := getFunctionOutput.Configuration
|
|
|
|
// TODO error checking / handling on the Set() calls.
|
2015-08-17 20:27:16 +02:00
|
|
|
d.Set("arn", function.FunctionArn)
|
2015-06-01 18:33:22 +02:00
|
|
|
d.Set("description", function.Description)
|
|
|
|
d.Set("handler", function.Handler)
|
|
|
|
d.Set("memory_size", function.MemorySize)
|
|
|
|
d.Set("last_modified", function.LastModified)
|
|
|
|
d.Set("role", function.Role)
|
|
|
|
d.Set("runtime", function.Runtime)
|
|
|
|
d.Set("timeout", function.Timeout)
|
2016-02-18 22:45:32 +01:00
|
|
|
if config := flattenLambdaVpcConfigResponse(function.VpcConfig); len(config) > 0 {
|
2016-04-13 15:47:51 +02:00
|
|
|
log.Printf("[INFO] Setting Lambda %s VPC config %#v from API", d.Id(), config)
|
|
|
|
err := d.Set("vpc_config", config)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed setting vpc_config: %s", err)
|
|
|
|
}
|
2016-02-18 22:45:32 +01:00
|
|
|
}
|
2016-02-19 13:13:26 +01:00
|
|
|
d.Set("source_code_hash", function.CodeSha256)
|
2015-12-10 02:42:12 +01:00
|
|
|
|
2016-09-04 10:55:13 +02:00
|
|
|
// List is sorted from oldest to latest
|
|
|
|
// so this may get costly over time :'(
|
|
|
|
var lastVersion, lastQualifiedArn string
|
|
|
|
err = listVersionsByFunctionPages(conn, &lambda.ListVersionsByFunctionInput{
|
|
|
|
FunctionName: function.FunctionName,
|
|
|
|
MaxItems: aws.Int64(10000),
|
|
|
|
}, func(p *lambda.ListVersionsByFunctionOutput, lastPage bool) bool {
|
|
|
|
if lastPage {
|
|
|
|
last := p.Versions[len(p.Versions)-1]
|
|
|
|
lastVersion = *last.Version
|
|
|
|
lastQualifiedArn = *last.FunctionArn
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
d.Set("version", lastVersion)
|
|
|
|
d.Set("qualified_arn", lastQualifiedArn)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func listVersionsByFunctionPages(c *lambda.Lambda, input *lambda.ListVersionsByFunctionInput,
|
|
|
|
fn func(p *lambda.ListVersionsByFunctionOutput, lastPage bool) bool) error {
|
|
|
|
for {
|
|
|
|
page, err := c.ListVersionsByFunction(input)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lastPage := page.NextMarker == nil
|
|
|
|
|
|
|
|
shouldContinue := fn(page, lastPage)
|
|
|
|
if !shouldContinue || lastPage {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2015-06-01 18:33:22 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// resourceAwsLambdaFunction maps to:
|
|
|
|
// DeleteFunction in the API / SDK
|
|
|
|
func resourceAwsLambdaFunctionDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
conn := meta.(*AWSClient).lambdaconn
|
|
|
|
|
|
|
|
log.Printf("[INFO] Deleting Lambda Function: %s", d.Id())
|
|
|
|
|
|
|
|
params := &lambda.DeleteFunctionInput{
|
|
|
|
FunctionName: aws.String(d.Get("function_name").(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := conn.DeleteFunction(params)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting Lambda Function: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId("")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// resourceAwsLambdaFunctionUpdate maps to:
|
|
|
|
// UpdateFunctionCode in the API / SDK
|
|
|
|
func resourceAwsLambdaFunctionUpdate(d *schema.ResourceData, meta interface{}) error {
|
2015-11-09 18:39:24 +01:00
|
|
|
conn := meta.(*AWSClient).lambdaconn
|
|
|
|
|
|
|
|
d.Partial(true)
|
|
|
|
|
2016-05-27 07:38:12 +02:00
|
|
|
if d.HasChange("filename") || d.HasChange("source_code_hash") || d.HasChange("s3_bucket") || d.HasChange("s3_key") || d.HasChange("s3_object_version") {
|
|
|
|
codeReq := &lambda.UpdateFunctionCodeInput{
|
|
|
|
FunctionName: aws.String(d.Id()),
|
2016-09-04 10:55:13 +02:00
|
|
|
Publish: aws.Bool(d.Get("publish").(bool)),
|
2016-05-27 07:38:12 +02:00
|
|
|
}
|
2015-11-09 18:39:24 +01:00
|
|
|
|
2016-05-27 07:38:12 +02:00
|
|
|
if v, ok := d.GetOk("filename"); ok {
|
2016-10-27 21:41:20 +02:00
|
|
|
// Grab an exclusive lock so that we're only reading one function into
|
|
|
|
// memory at a time.
|
|
|
|
// See https://github.com/hashicorp/terraform/issues/9364
|
|
|
|
awsMutexKV.Lock(awsMutexLambdaKey)
|
|
|
|
defer awsMutexKV.Unlock(awsMutexLambdaKey)
|
2016-05-24 23:39:35 +02:00
|
|
|
file, err := loadFileContent(v.(string))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Unable to load %q: %s", v.(string), err)
|
|
|
|
}
|
|
|
|
codeReq.ZipFile = file
|
2016-05-27 07:38:12 +02:00
|
|
|
} else {
|
|
|
|
s3Bucket, _ := d.GetOk("s3_bucket")
|
|
|
|
s3Key, _ := d.GetOk("s3_key")
|
|
|
|
s3ObjectVersion, versionOk := d.GetOk("s3_object_version")
|
|
|
|
|
|
|
|
codeReq.S3Bucket = aws.String(s3Bucket.(string))
|
|
|
|
codeReq.S3Key = aws.String(s3Key.(string))
|
|
|
|
if versionOk {
|
2016-05-25 10:02:20 +02:00
|
|
|
codeReq.S3ObjectVersion = aws.String(s3ObjectVersion.(string))
|
|
|
|
}
|
2015-11-09 18:39:24 +01:00
|
|
|
}
|
|
|
|
|
2016-06-09 23:44:10 +02:00
|
|
|
log.Printf("[DEBUG] Send Update Lambda Function Code request: %#v", codeReq)
|
2016-05-27 07:38:12 +02:00
|
|
|
|
2015-11-09 18:39:24 +01:00
|
|
|
_, err := conn.UpdateFunctionCode(codeReq)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error modifying Lambda Function Code %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetPartial("filename")
|
|
|
|
d.SetPartial("source_code_hash")
|
|
|
|
d.SetPartial("s3_bucket")
|
|
|
|
d.SetPartial("s3_key")
|
|
|
|
d.SetPartial("s3_object_version")
|
|
|
|
}
|
|
|
|
|
|
|
|
configReq := &lambda.UpdateFunctionConfigurationInput{
|
|
|
|
FunctionName: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
configUpdate := false
|
|
|
|
if d.HasChange("description") {
|
|
|
|
configReq.Description = aws.String(d.Get("description").(string))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
if d.HasChange("handler") {
|
|
|
|
configReq.Handler = aws.String(d.Get("handler").(string))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
if d.HasChange("memory_size") {
|
|
|
|
configReq.MemorySize = aws.Int64(int64(d.Get("memory_size").(int)))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
if d.HasChange("role") {
|
|
|
|
configReq.Role = aws.String(d.Get("role").(string))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
if d.HasChange("timeout") {
|
|
|
|
configReq.Timeout = aws.Int64(int64(d.Get("timeout").(int)))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if configUpdate {
|
2016-06-09 23:44:10 +02:00
|
|
|
log.Printf("[DEBUG] Send Update Lambda Function Configuration request: %#v", configReq)
|
2015-11-09 18:39:24 +01:00
|
|
|
_, err := conn.UpdateFunctionConfiguration(configReq)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error modifying Lambda Function Configuration %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
d.SetPartial("description")
|
|
|
|
d.SetPartial("handler")
|
|
|
|
d.SetPartial("memory_size")
|
|
|
|
d.SetPartial("role")
|
|
|
|
d.SetPartial("timeout")
|
|
|
|
}
|
|
|
|
d.Partial(false)
|
|
|
|
|
|
|
|
return resourceAwsLambdaFunctionRead(d, meta)
|
|
|
|
}
|
|
|
|
|
2016-02-19 13:13:26 +01:00
|
|
|
// loadFileContent returns contents of a file in a given path
|
|
|
|
func loadFileContent(v string) ([]byte, error) {
|
2015-11-09 18:39:24 +01:00
|
|
|
filename, err := homedir.Expand(v)
|
|
|
|
if err != nil {
|
2016-02-19 13:13:26 +01:00
|
|
|
return nil, err
|
2015-11-09 18:39:24 +01:00
|
|
|
}
|
2016-02-19 13:13:26 +01:00
|
|
|
fileContent, err := ioutil.ReadFile(filename)
|
2015-12-10 02:42:12 +01:00
|
|
|
if err != nil {
|
2016-02-19 13:13:26 +01:00
|
|
|
return nil, err
|
2015-12-10 02:42:12 +01:00
|
|
|
}
|
2016-02-19 13:13:26 +01:00
|
|
|
return fileContent, nil
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
2016-02-18 22:45:32 +01:00
|
|
|
|
|
|
|
func validateVPCConfig(v interface{}) (map[string]interface{}, error) {
|
|
|
|
configs := v.([]interface{})
|
|
|
|
if len(configs) > 1 {
|
|
|
|
return nil, errors.New("Only a single vpc_config block is expected")
|
|
|
|
}
|
|
|
|
|
|
|
|
config, ok := configs[0].(map[string]interface{})
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return nil, errors.New("vpc_config is <nil>")
|
|
|
|
}
|
|
|
|
|
2016-08-15 18:52:42 +02:00
|
|
|
// if subnet_ids and security_group_ids are both empty then the VPC is optional
|
|
|
|
if config["subnet_ids"].(*schema.Set).Len() == 0 && config["security_group_ids"].(*schema.Set).Len() == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-02-18 22:45:32 +01:00
|
|
|
if config["subnet_ids"].(*schema.Set).Len() == 0 {
|
|
|
|
return nil, errors.New("vpc_config.subnet_ids cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
if config["security_group_ids"].(*schema.Set).Len() == 0 {
|
|
|
|
return nil, errors.New("vpc_config.security_group_ids cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
return config, nil
|
|
|
|
}
|