2015-06-01 18:33:22 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"time"
|
|
|
|
|
2015-06-09 21:12:47 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
|
|
"github.com/aws/aws-sdk-go/service/lambda"
|
2015-06-09 21:27:40 +02:00
|
|
|
"github.com/mitchellh/go-homedir"
|
2015-06-01 18:33:22 +02:00
|
|
|
|
2015-11-06 22:27:47 +01:00
|
|
|
"errors"
|
|
|
|
|
2016-01-05 18:22:57 +01:00
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
2015-06-01 18:33:22 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2017-05-24 12:37:04 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/validation"
|
2015-06-01 18:33:22 +02:00
|
|
|
)
|
|
|
|
|
2016-10-27 21:41:20 +02:00
|
|
|
const awsMutexLambdaKey = `aws_lambda_function`
|
|
|
|
|
2015-06-01 18:33:22 +02:00
|
|
|
func resourceAwsLambdaFunction() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceAwsLambdaFunctionCreate,
|
|
|
|
Read: resourceAwsLambdaFunctionRead,
|
|
|
|
Update: resourceAwsLambdaFunctionUpdate,
|
|
|
|
Delete: resourceAwsLambdaFunctionDelete,
|
|
|
|
|
2016-07-16 16:36:28 +02:00
|
|
|
Importer: &schema.ResourceImporter{
|
|
|
|
State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
|
|
d.Set("function_name", d.Id())
|
|
|
|
return []*schema.ResourceData{d}, nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2015-06-01 18:33:22 +02:00
|
|
|
Schema: map[string]*schema.Schema{
|
2016-11-19 00:48:18 +01:00
|
|
|
"filename": {
|
2015-11-06 22:27:47 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ConflictsWith: []string{"s3_bucket", "s3_key", "s3_object_version"},
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"s3_bucket": {
|
2015-11-06 22:27:47 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ConflictsWith: []string{"filename"},
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"s3_key": {
|
2015-11-06 22:27:47 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ConflictsWith: []string{"filename"},
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"s3_object_version": {
|
2015-11-06 22:27:47 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ConflictsWith: []string{"filename"},
|
2015-06-01 18:33:22 +02:00
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"description": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
2017-02-23 02:50:10 +01:00
|
|
|
"dead_letter_config": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
MinItems: 0,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"target_arn": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validateArn,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"function_name": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"handler": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"memory_size": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Default: 128,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"role": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"runtime": {
|
2016-12-16 13:24:45 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validateRuntime,
|
2015-06-01 18:33:22 +02:00
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"timeout": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Default: 3,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"publish": {
|
2016-09-04 10:55:13 +02:00
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"version": {
|
2016-09-04 10:55:13 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"vpc_config": {
|
2016-02-16 06:38:17 +01:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-11-19 00:48:18 +01:00
|
|
|
"subnet_ids": {
|
2016-02-16 06:38:17 +01:00
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: schema.HashString,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"security_group_ids": {
|
2016-02-16 06:38:17 +01:00
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: schema.HashString,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"vpc_id": {
|
2016-04-13 15:47:51 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-02-16 06:38:17 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"arn": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"qualified_arn": {
|
2016-09-04 10:55:13 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2017-04-24 20:47:11 +02:00
|
|
|
"invoke_arn": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"last_modified": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"source_code_hash": {
|
2015-06-01 18:33:22 +02:00
|
|
|
Type: schema.TypeString,
|
2016-02-19 13:13:26 +01:00
|
|
|
Optional: true,
|
2015-12-10 02:42:12 +01:00
|
|
|
Computed: true,
|
2015-06-01 18:33:22 +02:00
|
|
|
},
|
2016-11-19 00:48:18 +01:00
|
|
|
"environment": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"variables": {
|
|
|
|
Type: schema.TypeMap,
|
|
|
|
Optional: true,
|
|
|
|
Elem: schema.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2017-05-24 12:37:04 +02:00
|
|
|
"tracing_config": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
MaxItems: 1,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"mode": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validation.StringInSlice([]string{"Active", "PassThrough"}, true),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2016-11-19 00:48:18 +01:00
|
|
|
"kms_key_arn": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ValidateFunc: validateArn,
|
|
|
|
},
|
2017-04-23 03:51:20 +02:00
|
|
|
|
|
|
|
"tags": tagsSchema(),
|
2015-06-01 18:33:22 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// resourceAwsLambdaFunction maps to:
|
|
|
|
// CreateFunction in the API / SDK
|
|
|
|
func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
conn := meta.(*AWSClient).lambdaconn
|
|
|
|
|
|
|
|
functionName := d.Get("function_name").(string)
|
|
|
|
iamRole := d.Get("role").(string)
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Creating Lambda Function %s with role %s", functionName, iamRole)
|
|
|
|
|
2016-12-12 16:12:56 +01:00
|
|
|
filename, hasFilename := d.GetOk("filename")
|
|
|
|
s3Bucket, bucketOk := d.GetOk("s3_bucket")
|
|
|
|
s3Key, keyOk := d.GetOk("s3_key")
|
|
|
|
s3ObjectVersion, versionOk := d.GetOk("s3_object_version")
|
|
|
|
|
|
|
|
if !hasFilename && !bucketOk && !keyOk && !versionOk {
|
|
|
|
return errors.New("filename or s3_* attributes must be set")
|
|
|
|
}
|
|
|
|
|
2015-11-06 22:27:47 +01:00
|
|
|
var functionCode *lambda.FunctionCode
|
2016-12-12 16:12:56 +01:00
|
|
|
if hasFilename {
|
2016-10-27 21:41:20 +02:00
|
|
|
// Grab an exclusive lock so that we're only reading one function into
|
|
|
|
// memory at a time.
|
|
|
|
// See https://github.com/hashicorp/terraform/issues/9364
|
|
|
|
awsMutexKV.Lock(awsMutexLambdaKey)
|
|
|
|
defer awsMutexKV.Unlock(awsMutexLambdaKey)
|
2016-12-12 16:12:56 +01:00
|
|
|
file, err := loadFileContent(filename.(string))
|
2015-11-06 22:27:47 +01:00
|
|
|
if err != nil {
|
2016-12-12 16:12:56 +01:00
|
|
|
return fmt.Errorf("Unable to load %q: %s", filename.(string), err)
|
2015-11-06 22:27:47 +01:00
|
|
|
}
|
|
|
|
functionCode = &lambda.FunctionCode{
|
2016-02-19 13:13:26 +01:00
|
|
|
ZipFile: file,
|
2015-11-06 22:27:47 +01:00
|
|
|
}
|
|
|
|
} else {
|
2016-01-21 01:33:14 +01:00
|
|
|
if !bucketOk || !keyOk {
|
|
|
|
return errors.New("s3_bucket and s3_key must all be set while using S3 code source")
|
2015-11-06 22:27:47 +01:00
|
|
|
}
|
|
|
|
functionCode = &lambda.FunctionCode{
|
2016-01-21 01:33:14 +01:00
|
|
|
S3Bucket: aws.String(s3Bucket.(string)),
|
|
|
|
S3Key: aws.String(s3Key.(string)),
|
|
|
|
}
|
|
|
|
if versionOk {
|
|
|
|
functionCode.S3ObjectVersion = aws.String(s3ObjectVersion.(string))
|
2015-11-06 22:27:47 +01:00
|
|
|
}
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
params := &lambda.CreateFunctionInput{
|
2015-11-06 22:27:47 +01:00
|
|
|
Code: functionCode,
|
2015-06-01 18:33:22 +02:00
|
|
|
Description: aws.String(d.Get("description").(string)),
|
|
|
|
FunctionName: aws.String(functionName),
|
|
|
|
Handler: aws.String(d.Get("handler").(string)),
|
2015-07-28 22:29:46 +02:00
|
|
|
MemorySize: aws.Int64(int64(d.Get("memory_size").(int))),
|
2015-06-01 18:33:22 +02:00
|
|
|
Role: aws.String(iamRole),
|
|
|
|
Runtime: aws.String(d.Get("runtime").(string)),
|
2015-07-28 22:29:46 +02:00
|
|
|
Timeout: aws.Int64(int64(d.Get("timeout").(int))),
|
2016-09-04 10:55:13 +02:00
|
|
|
Publish: aws.Bool(d.Get("publish").(bool)),
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:50:10 +01:00
|
|
|
if v, ok := d.GetOk("dead_letter_config"); ok {
|
|
|
|
dlcMaps := v.([]interface{})
|
|
|
|
if len(dlcMaps) == 1 { // Schema guarantees either 0 or 1
|
|
|
|
dlcMap := dlcMaps[0].(map[string]interface{})
|
|
|
|
params.DeadLetterConfig = &lambda.DeadLetterConfig{
|
|
|
|
TargetArn: aws.String(dlcMap["target_arn"].(string)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-16 06:38:17 +01:00
|
|
|
if v, ok := d.GetOk("vpc_config"); ok {
|
2016-02-18 22:45:32 +01:00
|
|
|
config, err := validateVPCConfig(v)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-02-16 06:38:17 +01:00
|
|
|
|
2016-08-15 18:52:42 +02:00
|
|
|
if config != nil {
|
|
|
|
var subnetIds []*string
|
|
|
|
for _, id := range config["subnet_ids"].(*schema.Set).List() {
|
|
|
|
subnetIds = append(subnetIds, aws.String(id.(string)))
|
|
|
|
}
|
2016-02-16 06:38:17 +01:00
|
|
|
|
2016-08-15 18:52:42 +02:00
|
|
|
var securityGroupIds []*string
|
|
|
|
for _, id := range config["security_group_ids"].(*schema.Set).List() {
|
|
|
|
securityGroupIds = append(securityGroupIds, aws.String(id.(string)))
|
|
|
|
}
|
2016-02-18 22:45:32 +01:00
|
|
|
|
2016-08-15 18:52:42 +02:00
|
|
|
params.VpcConfig = &lambda.VpcConfig{
|
|
|
|
SubnetIds: subnetIds,
|
|
|
|
SecurityGroupIds: securityGroupIds,
|
|
|
|
}
|
2016-02-16 06:38:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-24 12:37:04 +02:00
|
|
|
if v, ok := d.GetOk("tracing_config"); ok {
|
|
|
|
tracingConfig := v.([]interface{})
|
|
|
|
tracing := tracingConfig[0].(map[string]interface{})
|
|
|
|
params.TracingConfig = &lambda.TracingConfig{
|
|
|
|
Mode: aws.String(tracing["mode"].(string)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-19 00:48:18 +01:00
|
|
|
if v, ok := d.GetOk("environment"); ok {
|
|
|
|
environments := v.([]interface{})
|
|
|
|
environment, ok := environments[0].(map[string]interface{})
|
|
|
|
if !ok {
|
|
|
|
return errors.New("At least one field is expected inside environment")
|
|
|
|
}
|
|
|
|
|
|
|
|
if environmentVariables, ok := environment["variables"]; ok {
|
|
|
|
variables := readEnvironmentVariables(environmentVariables.(map[string]interface{}))
|
|
|
|
|
|
|
|
params.Environment = &lambda.Environment{
|
|
|
|
Variables: aws.StringMap(variables),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("kms_key_arn"); ok {
|
|
|
|
params.KMSKeyArn = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
2017-04-23 03:51:20 +02:00
|
|
|
if v, exists := d.GetOk("tags"); exists {
|
|
|
|
params.Tags = tagsFromMapGeneric(v.(map[string]interface{}))
|
|
|
|
}
|
|
|
|
|
2016-01-05 18:22:57 +01:00
|
|
|
// IAM profiles can take ~10 seconds to propagate in AWS:
|
2016-02-18 22:45:32 +01:00
|
|
|
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
|
2016-01-05 18:22:57 +01:00
|
|
|
// Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda.
|
2016-11-15 00:43:22 +01:00
|
|
|
err := resource.Retry(10*time.Minute, func() *resource.RetryError {
|
2016-01-05 18:35:21 +01:00
|
|
|
_, err := conn.CreateFunction(params)
|
2016-01-05 18:22:57 +01:00
|
|
|
if err != nil {
|
2016-02-16 06:38:17 +01:00
|
|
|
log.Printf("[DEBUG] Error creating Lambda Function: %s", err)
|
2017-04-01 16:39:46 +02:00
|
|
|
|
|
|
|
if isAWSErr(err, "InvalidParameterValueException", "The role defined for the function cannot be assumed by Lambda") {
|
|
|
|
log.Printf("[DEBUG] Received %s, retrying CreateFunction", err)
|
|
|
|
return resource.RetryableError(err)
|
|
|
|
}
|
|
|
|
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.NonRetryableError(err)
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
2016-01-05 18:22:57 +01:00
|
|
|
return nil
|
|
|
|
})
|
2015-06-01 18:33:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating Lambda function: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId(d.Get("function_name").(string))
|
|
|
|
|
|
|
|
return resourceAwsLambdaFunctionRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
// resourceAwsLambdaFunctionRead maps to:
|
|
|
|
// GetFunction in the API / SDK
|
|
|
|
func resourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
conn := meta.(*AWSClient).lambdaconn
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Fetching Lambda Function: %s", d.Id())
|
|
|
|
|
|
|
|
params := &lambda.GetFunctionInput{
|
|
|
|
FunctionName: aws.String(d.Get("function_name").(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
getFunctionOutput, err := conn.GetFunction(params)
|
|
|
|
if err != nil {
|
2016-07-27 17:47:25 +02:00
|
|
|
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" && !d.IsNewResource() {
|
2016-03-16 19:24:38 +01:00
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
2015-06-01 18:33:22 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFunctionOutput.Code.Location is a pre-signed URL pointing at the zip
|
|
|
|
// file that we uploaded when we created the resource. You can use it to
|
|
|
|
// download the code from AWS. The other part is
|
|
|
|
// getFunctionOutput.Configuration which holds metadata.
|
|
|
|
|
|
|
|
function := getFunctionOutput.Configuration
|
|
|
|
// TODO error checking / handling on the Set() calls.
|
2015-08-17 20:27:16 +02:00
|
|
|
d.Set("arn", function.FunctionArn)
|
2015-06-01 18:33:22 +02:00
|
|
|
d.Set("description", function.Description)
|
|
|
|
d.Set("handler", function.Handler)
|
|
|
|
d.Set("memory_size", function.MemorySize)
|
|
|
|
d.Set("last_modified", function.LastModified)
|
|
|
|
d.Set("role", function.Role)
|
|
|
|
d.Set("runtime", function.Runtime)
|
|
|
|
d.Set("timeout", function.Timeout)
|
2016-11-19 00:48:18 +01:00
|
|
|
d.Set("kms_key_arn", function.KMSKeyArn)
|
2017-04-23 03:51:20 +02:00
|
|
|
d.Set("tags", tagsToMapGeneric(getFunctionOutput.Tags))
|
2016-11-21 23:52:14 +01:00
|
|
|
|
|
|
|
config := flattenLambdaVpcConfigResponse(function.VpcConfig)
|
|
|
|
log.Printf("[INFO] Setting Lambda %s VPC config %#v from API", d.Id(), config)
|
|
|
|
vpcSetErr := d.Set("vpc_config", config)
|
|
|
|
if vpcSetErr != nil {
|
|
|
|
return fmt.Errorf("Failed setting vpc_config: %s", vpcSetErr)
|
2016-02-18 22:45:32 +01:00
|
|
|
}
|
2016-11-21 23:52:14 +01:00
|
|
|
|
2016-02-19 13:13:26 +01:00
|
|
|
d.Set("source_code_hash", function.CodeSha256)
|
2015-12-10 02:42:12 +01:00
|
|
|
|
2016-11-23 17:36:16 +01:00
|
|
|
if err := d.Set("environment", flattenLambdaEnvironment(function.Environment)); err != nil {
|
2016-11-21 23:52:14 +01:00
|
|
|
log.Printf("[ERR] Error setting environment for Lambda Function (%s): %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:50:10 +01:00
|
|
|
if function.DeadLetterConfig != nil && function.DeadLetterConfig.TargetArn != nil {
|
|
|
|
d.Set("dead_letter_config", []interface{}{
|
|
|
|
map[string]interface{}{
|
|
|
|
"target_arn": *function.DeadLetterConfig.TargetArn,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
d.Set("dead_letter_config", []interface{}{})
|
|
|
|
}
|
|
|
|
|
2017-05-24 12:37:04 +02:00
|
|
|
if function.TracingConfig != nil {
|
|
|
|
d.Set("tracing_config", []interface{}{
|
|
|
|
map[string]interface{}{
|
|
|
|
"mode": *function.TracingConfig.Mode,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-09-04 10:55:13 +02:00
|
|
|
// List is sorted from oldest to latest
|
|
|
|
// so this may get costly over time :'(
|
|
|
|
var lastVersion, lastQualifiedArn string
|
|
|
|
err = listVersionsByFunctionPages(conn, &lambda.ListVersionsByFunctionInput{
|
|
|
|
FunctionName: function.FunctionName,
|
|
|
|
MaxItems: aws.Int64(10000),
|
|
|
|
}, func(p *lambda.ListVersionsByFunctionOutput, lastPage bool) bool {
|
|
|
|
if lastPage {
|
|
|
|
last := p.Versions[len(p.Versions)-1]
|
|
|
|
lastVersion = *last.Version
|
|
|
|
lastQualifiedArn = *last.FunctionArn
|
2017-03-15 19:51:43 +01:00
|
|
|
return false
|
2016-09-04 10:55:13 +02:00
|
|
|
}
|
2017-03-15 19:51:43 +01:00
|
|
|
return true
|
2016-09-04 10:55:13 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
d.Set("version", lastVersion)
|
|
|
|
d.Set("qualified_arn", lastQualifiedArn)
|
|
|
|
|
2017-04-24 20:47:11 +02:00
|
|
|
d.Set("invoke_arn", buildLambdaInvokeArn(*function.FunctionArn, meta.(*AWSClient).region))
|
|
|
|
|
2016-09-04 10:55:13 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func listVersionsByFunctionPages(c *lambda.Lambda, input *lambda.ListVersionsByFunctionInput,
|
|
|
|
fn func(p *lambda.ListVersionsByFunctionOutput, lastPage bool) bool) error {
|
|
|
|
for {
|
|
|
|
page, err := c.ListVersionsByFunction(input)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lastPage := page.NextMarker == nil
|
|
|
|
|
|
|
|
shouldContinue := fn(page, lastPage)
|
|
|
|
if !shouldContinue || lastPage {
|
|
|
|
break
|
|
|
|
}
|
2017-03-15 19:51:43 +01:00
|
|
|
input.Marker = page.NextMarker
|
2016-09-04 10:55:13 +02:00
|
|
|
}
|
2015-06-01 18:33:22 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// resourceAwsLambdaFunction maps to:
|
|
|
|
// DeleteFunction in the API / SDK
|
|
|
|
func resourceAwsLambdaFunctionDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
conn := meta.(*AWSClient).lambdaconn
|
|
|
|
|
|
|
|
log.Printf("[INFO] Deleting Lambda Function: %s", d.Id())
|
|
|
|
|
|
|
|
params := &lambda.DeleteFunctionInput{
|
|
|
|
FunctionName: aws.String(d.Get("function_name").(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := conn.DeleteFunction(params)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting Lambda Function: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId("")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// resourceAwsLambdaFunctionUpdate maps to:
|
|
|
|
// UpdateFunctionCode in the API / SDK
|
|
|
|
func resourceAwsLambdaFunctionUpdate(d *schema.ResourceData, meta interface{}) error {
|
2015-11-09 18:39:24 +01:00
|
|
|
conn := meta.(*AWSClient).lambdaconn
|
|
|
|
|
|
|
|
d.Partial(true)
|
|
|
|
|
2017-04-23 03:51:20 +02:00
|
|
|
arn := d.Get("arn").(string)
|
|
|
|
if tagErr := setTagsLambda(conn, d, arn); tagErr != nil {
|
|
|
|
return tagErr
|
|
|
|
}
|
|
|
|
d.SetPartial("tags")
|
|
|
|
|
2016-05-27 07:38:12 +02:00
|
|
|
if d.HasChange("filename") || d.HasChange("source_code_hash") || d.HasChange("s3_bucket") || d.HasChange("s3_key") || d.HasChange("s3_object_version") {
|
|
|
|
codeReq := &lambda.UpdateFunctionCodeInput{
|
|
|
|
FunctionName: aws.String(d.Id()),
|
2016-09-04 10:55:13 +02:00
|
|
|
Publish: aws.Bool(d.Get("publish").(bool)),
|
2016-05-27 07:38:12 +02:00
|
|
|
}
|
2015-11-09 18:39:24 +01:00
|
|
|
|
2016-05-27 07:38:12 +02:00
|
|
|
if v, ok := d.GetOk("filename"); ok {
|
2016-10-27 21:41:20 +02:00
|
|
|
// Grab an exclusive lock so that we're only reading one function into
|
|
|
|
// memory at a time.
|
|
|
|
// See https://github.com/hashicorp/terraform/issues/9364
|
|
|
|
awsMutexKV.Lock(awsMutexLambdaKey)
|
|
|
|
defer awsMutexKV.Unlock(awsMutexLambdaKey)
|
2016-05-24 23:39:35 +02:00
|
|
|
file, err := loadFileContent(v.(string))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Unable to load %q: %s", v.(string), err)
|
|
|
|
}
|
|
|
|
codeReq.ZipFile = file
|
2016-05-27 07:38:12 +02:00
|
|
|
} else {
|
|
|
|
s3Bucket, _ := d.GetOk("s3_bucket")
|
|
|
|
s3Key, _ := d.GetOk("s3_key")
|
|
|
|
s3ObjectVersion, versionOk := d.GetOk("s3_object_version")
|
|
|
|
|
|
|
|
codeReq.S3Bucket = aws.String(s3Bucket.(string))
|
|
|
|
codeReq.S3Key = aws.String(s3Key.(string))
|
|
|
|
if versionOk {
|
2016-05-25 10:02:20 +02:00
|
|
|
codeReq.S3ObjectVersion = aws.String(s3ObjectVersion.(string))
|
|
|
|
}
|
2015-11-09 18:39:24 +01:00
|
|
|
}
|
|
|
|
|
2016-06-09 23:44:10 +02:00
|
|
|
log.Printf("[DEBUG] Send Update Lambda Function Code request: %#v", codeReq)
|
2016-05-27 07:38:12 +02:00
|
|
|
|
2015-11-09 18:39:24 +01:00
|
|
|
_, err := conn.UpdateFunctionCode(codeReq)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error modifying Lambda Function Code %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetPartial("filename")
|
|
|
|
d.SetPartial("source_code_hash")
|
|
|
|
d.SetPartial("s3_bucket")
|
|
|
|
d.SetPartial("s3_key")
|
|
|
|
d.SetPartial("s3_object_version")
|
|
|
|
}
|
|
|
|
|
|
|
|
configReq := &lambda.UpdateFunctionConfigurationInput{
|
|
|
|
FunctionName: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
configUpdate := false
|
|
|
|
if d.HasChange("description") {
|
|
|
|
configReq.Description = aws.String(d.Get("description").(string))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
if d.HasChange("handler") {
|
|
|
|
configReq.Handler = aws.String(d.Get("handler").(string))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
if d.HasChange("memory_size") {
|
|
|
|
configReq.MemorySize = aws.Int64(int64(d.Get("memory_size").(int)))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
if d.HasChange("role") {
|
|
|
|
configReq.Role = aws.String(d.Get("role").(string))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
if d.HasChange("timeout") {
|
|
|
|
configReq.Timeout = aws.Int64(int64(d.Get("timeout").(int)))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
2016-11-19 00:48:18 +01:00
|
|
|
if d.HasChange("kms_key_arn") {
|
|
|
|
configReq.KMSKeyArn = aws.String(d.Get("kms_key_arn").(string))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
2017-02-23 02:50:10 +01:00
|
|
|
if d.HasChange("dead_letter_config") {
|
|
|
|
dlcMaps := d.Get("dead_letter_config").([]interface{})
|
|
|
|
if len(dlcMaps) == 1 { // Schema guarantees either 0 or 1
|
|
|
|
dlcMap := dlcMaps[0].(map[string]interface{})
|
|
|
|
configReq.DeadLetterConfig = &lambda.DeadLetterConfig{
|
|
|
|
TargetArn: aws.String(dlcMap["target_arn"].(string)),
|
|
|
|
}
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
}
|
2017-05-24 12:37:04 +02:00
|
|
|
if d.HasChange("tracing_config") {
|
|
|
|
tracingConfig := d.Get("tracing_config").([]interface{})
|
|
|
|
if len(tracingConfig) == 1 { // Schema guarantees either 0 or 1
|
|
|
|
config := tracingConfig[0].(map[string]interface{})
|
|
|
|
configReq.TracingConfig = &lambda.TracingConfig{
|
|
|
|
Mode: aws.String(config["mode"].(string)),
|
|
|
|
}
|
|
|
|
configUpdate = true
|
|
|
|
}
|
|
|
|
}
|
2017-03-01 16:30:39 +01:00
|
|
|
if d.HasChange("runtime") {
|
|
|
|
configReq.Runtime = aws.String(d.Get("runtime").(string))
|
|
|
|
configUpdate = true
|
|
|
|
}
|
2016-11-19 00:48:18 +01:00
|
|
|
if d.HasChange("environment") {
|
|
|
|
if v, ok := d.GetOk("environment"); ok {
|
|
|
|
environments := v.([]interface{})
|
|
|
|
environment, ok := environments[0].(map[string]interface{})
|
|
|
|
if !ok {
|
|
|
|
return errors.New("At least one field is expected inside environment")
|
|
|
|
}
|
|
|
|
|
|
|
|
if environmentVariables, ok := environment["variables"]; ok {
|
|
|
|
variables := readEnvironmentVariables(environmentVariables.(map[string]interface{}))
|
|
|
|
|
|
|
|
configReq.Environment = &lambda.Environment{
|
|
|
|
Variables: aws.StringMap(variables),
|
|
|
|
}
|
|
|
|
configUpdate = true
|
|
|
|
}
|
2016-12-02 14:36:55 +01:00
|
|
|
} else {
|
|
|
|
configReq.Environment = &lambda.Environment{
|
|
|
|
Variables: aws.StringMap(map[string]string{}),
|
|
|
|
}
|
|
|
|
configUpdate = true
|
2016-11-19 00:48:18 +01:00
|
|
|
}
|
|
|
|
}
|
2015-11-09 18:39:24 +01:00
|
|
|
|
|
|
|
if configUpdate {
|
2016-06-09 23:44:10 +02:00
|
|
|
log.Printf("[DEBUG] Send Update Lambda Function Configuration request: %#v", configReq)
|
2015-11-09 18:39:24 +01:00
|
|
|
_, err := conn.UpdateFunctionConfiguration(configReq)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error modifying Lambda Function Configuration %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
d.SetPartial("description")
|
|
|
|
d.SetPartial("handler")
|
|
|
|
d.SetPartial("memory_size")
|
|
|
|
d.SetPartial("role")
|
|
|
|
d.SetPartial("timeout")
|
|
|
|
}
|
|
|
|
d.Partial(false)
|
|
|
|
|
|
|
|
return resourceAwsLambdaFunctionRead(d, meta)
|
|
|
|
}
|
|
|
|
|
2016-02-19 13:13:26 +01:00
|
|
|
// loadFileContent returns contents of a file in a given path
|
|
|
|
func loadFileContent(v string) ([]byte, error) {
|
2015-11-09 18:39:24 +01:00
|
|
|
filename, err := homedir.Expand(v)
|
|
|
|
if err != nil {
|
2016-02-19 13:13:26 +01:00
|
|
|
return nil, err
|
2015-11-09 18:39:24 +01:00
|
|
|
}
|
2016-02-19 13:13:26 +01:00
|
|
|
fileContent, err := ioutil.ReadFile(filename)
|
2015-12-10 02:42:12 +01:00
|
|
|
if err != nil {
|
2016-02-19 13:13:26 +01:00
|
|
|
return nil, err
|
2015-12-10 02:42:12 +01:00
|
|
|
}
|
2016-02-19 13:13:26 +01:00
|
|
|
return fileContent, nil
|
2015-06-01 18:33:22 +02:00
|
|
|
}
|
2016-02-18 22:45:32 +01:00
|
|
|
|
2016-11-19 00:48:18 +01:00
|
|
|
func readEnvironmentVariables(ev map[string]interface{}) map[string]string {
|
|
|
|
variables := make(map[string]string)
|
|
|
|
for k, v := range ev {
|
|
|
|
variables[k] = v.(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
return variables
|
|
|
|
}
|
|
|
|
|
2016-02-18 22:45:32 +01:00
|
|
|
func validateVPCConfig(v interface{}) (map[string]interface{}, error) {
|
|
|
|
configs := v.([]interface{})
|
|
|
|
if len(configs) > 1 {
|
|
|
|
return nil, errors.New("Only a single vpc_config block is expected")
|
|
|
|
}
|
|
|
|
|
|
|
|
config, ok := configs[0].(map[string]interface{})
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return nil, errors.New("vpc_config is <nil>")
|
|
|
|
}
|
|
|
|
|
2016-08-15 18:52:42 +02:00
|
|
|
// if subnet_ids and security_group_ids are both empty then the VPC is optional
|
|
|
|
if config["subnet_ids"].(*schema.Set).Len() == 0 && config["security_group_ids"].(*schema.Set).Len() == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-02-18 22:45:32 +01:00
|
|
|
if config["subnet_ids"].(*schema.Set).Len() == 0 {
|
|
|
|
return nil, errors.New("vpc_config.subnet_ids cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
if config["security_group_ids"].(*schema.Set).Len() == 0 {
|
|
|
|
return nil, errors.New("vpc_config.security_group_ids cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
return config, nil
|
|
|
|
}
|
2016-12-16 13:24:45 +01:00
|
|
|
|
|
|
|
func validateRuntime(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
runtime := v.(string)
|
|
|
|
|
|
|
|
if runtime == lambda.RuntimeNodejs {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%s has reached end of life since October 2016 and has been deprecated in favor of %s.",
|
|
|
|
runtime, lambda.RuntimeNodejs43))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|