2014-07-08 20:06:39 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
2014-08-21 07:24:13 +02:00
|
|
|
"bytes"
|
2014-07-08 20:06:39 +02:00
|
|
|
"fmt"
|
|
|
|
"log"
|
2014-08-22 17:46:03 +02:00
|
|
|
"sort"
|
2016-03-25 19:49:44 +01:00
|
|
|
"strconv"
|
2016-03-07 22:40:29 +01:00
|
|
|
"strings"
|
2014-07-17 20:14:51 +02:00
|
|
|
"time"
|
2014-07-08 20:06:39 +02:00
|
|
|
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
|
|
"github.com/aws/aws-sdk-go/service/ec2"
|
2014-08-21 07:24:13 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/hashcode"
|
2014-07-17 20:14:51 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
2014-08-20 19:40:18 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2014-07-08 20:06:39 +02:00
|
|
|
)
|
|
|
|
|
2014-08-20 19:40:18 +02:00
|
|
|
func resourceAwsSecurityGroup() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceAwsSecurityGroupCreate,
|
|
|
|
Read: resourceAwsSecurityGroupRead,
|
2014-08-20 19:54:43 +02:00
|
|
|
Update: resourceAwsSecurityGroupUpdate,
|
2014-08-20 19:40:18 +02:00
|
|
|
Delete: resourceAwsSecurityGroupDelete,
|
2016-05-04 22:34:04 +02:00
|
|
|
Importer: &schema.ResourceImporter{
|
|
|
|
State: resourceAwsSecurityGroupImportState,
|
|
|
|
},
|
2014-08-20 19:40:18 +02:00
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": &schema.Schema{
|
2015-12-17 18:24:24 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
2015-12-04 15:21:08 +01:00
|
|
|
ConflictsWith: []string{"name_prefix"},
|
2015-06-25 12:01:36 +02:00
|
|
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(string)
|
|
|
|
if len(value) > 255 {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q cannot be longer than 255 characters", k))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
},
|
2014-08-20 19:40:18 +02:00
|
|
|
},
|
|
|
|
|
2015-12-04 15:21:08 +01:00
|
|
|
"name_prefix": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(string)
|
|
|
|
if len(value) > 100 {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q cannot be longer than 100 characters, name is limited to 255", k))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2014-08-20 19:40:18 +02:00
|
|
|
"description": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
2015-04-22 20:27:20 +02:00
|
|
|
Optional: true,
|
2015-05-08 19:16:54 +02:00
|
|
|
ForceNew: true,
|
2015-04-22 20:27:20 +02:00
|
|
|
Default: "Managed by Terraform",
|
2015-06-25 12:01:36 +02:00
|
|
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(string)
|
|
|
|
if len(value) > 255 {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q cannot be longer than 255 characters", k))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
},
|
2014-08-20 19:40:18 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"vpc_id": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2014-10-11 02:12:03 +02:00
|
|
|
Computed: true,
|
2014-08-20 19:40:18 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"ingress": &schema.Schema{
|
2014-08-21 07:24:13 +02:00
|
|
|
Type: schema.TypeSet,
|
2014-12-08 08:52:04 +01:00
|
|
|
Optional: true,
|
2015-04-20 20:38:21 +02:00
|
|
|
Computed: true,
|
2014-08-20 19:40:18 +02:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"from_port": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"to_port": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"protocol": &schema.Schema{
|
2016-03-25 19:49:44 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
StateFunc: protocolStateFunc,
|
2014-08-20 19:40:18 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"cidr_blocks": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
|
|
|
|
|
|
|
"security_groups": &schema.Schema{
|
2014-10-21 19:49:27 +02:00
|
|
|
Type: schema.TypeSet,
|
2014-08-20 19:40:18 +02:00
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
2016-02-08 00:51:26 +01:00
|
|
|
Set: schema.HashString,
|
2014-08-20 19:40:18 +02:00
|
|
|
},
|
2014-09-30 23:19:16 +02:00
|
|
|
|
|
|
|
"self": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
2014-10-21 19:49:27 +02:00
|
|
|
Default: false,
|
2014-09-30 23:19:16 +02:00
|
|
|
},
|
2014-08-20 19:40:18 +02:00
|
|
|
},
|
|
|
|
},
|
2015-01-23 15:46:20 +01:00
|
|
|
Set: resourceAwsSecurityGroupRuleHash,
|
|
|
|
},
|
|
|
|
|
|
|
|
"egress": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
2015-02-18 18:27:55 +01:00
|
|
|
Computed: true,
|
2015-01-23 15:46:20 +01:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"from_port": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"to_port": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"protocol": &schema.Schema{
|
2016-03-25 19:49:44 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
StateFunc: protocolStateFunc,
|
2015-01-23 15:46:20 +01:00
|
|
|
},
|
|
|
|
|
|
|
|
"cidr_blocks": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
|
|
|
|
2016-06-06 12:07:19 +02:00
|
|
|
"prefix_list_ids": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
|
|
|
|
2015-01-23 15:46:20 +01:00
|
|
|
"security_groups": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
2016-02-08 00:51:26 +01:00
|
|
|
Set: schema.HashString,
|
2015-01-23 15:46:20 +01:00
|
|
|
},
|
|
|
|
|
|
|
|
"self": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Set: resourceAwsSecurityGroupRuleHash,
|
2014-08-20 19:40:18 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"owner_id": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2014-10-14 23:07:01 +02:00
|
|
|
|
|
|
|
"tags": tagsSchema(),
|
2014-08-20 19:40:18 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
2015-04-16 22:05:55 +02:00
|
|
|
conn := meta.(*AWSClient).ec2conn
|
2014-07-08 20:06:39 +02:00
|
|
|
|
2015-05-05 23:42:08 +02:00
|
|
|
securityGroupOpts := &ec2.CreateSecurityGroupInput{}
|
2014-07-17 02:13:16 +02:00
|
|
|
|
2015-05-06 23:54:43 +02:00
|
|
|
if v, ok := d.GetOk("vpc_id"); ok {
|
2015-08-17 20:27:16 +02:00
|
|
|
securityGroupOpts.VpcId = aws.String(v.(string))
|
2015-05-05 23:42:08 +02:00
|
|
|
}
|
2014-07-17 02:13:16 +02:00
|
|
|
|
2014-08-20 19:40:18 +02:00
|
|
|
if v := d.Get("description"); v != nil {
|
2015-03-09 16:02:27 +01:00
|
|
|
securityGroupOpts.Description = aws.String(v.(string))
|
2014-07-08 20:06:39 +02:00
|
|
|
}
|
|
|
|
|
2015-04-22 19:56:06 +02:00
|
|
|
var groupName string
|
|
|
|
if v, ok := d.GetOk("name"); ok {
|
|
|
|
groupName = v.(string)
|
2015-12-04 15:21:08 +01:00
|
|
|
} else if v, ok := d.GetOk("name_prefix"); ok {
|
|
|
|
groupName = resource.PrefixedUniqueId(v.(string))
|
2015-04-22 19:56:06 +02:00
|
|
|
} else {
|
|
|
|
groupName = resource.UniqueId()
|
|
|
|
}
|
|
|
|
securityGroupOpts.GroupName = aws.String(groupName)
|
|
|
|
|
2015-05-20 13:21:23 +02:00
|
|
|
var err error
|
2014-08-20 19:40:18 +02:00
|
|
|
log.Printf(
|
|
|
|
"[DEBUG] Security Group create configuration: %#v", securityGroupOpts)
|
2015-04-09 16:10:04 +02:00
|
|
|
createResp, err := conn.CreateSecurityGroup(securityGroupOpts)
|
2014-07-08 20:06:39 +02:00
|
|
|
if err != nil {
|
2014-08-20 19:40:18 +02:00
|
|
|
return fmt.Errorf("Error creating Security Group: %s", err)
|
2014-07-08 20:06:39 +02:00
|
|
|
}
|
|
|
|
|
2015-08-17 20:27:16 +02:00
|
|
|
d.SetId(*createResp.GroupId)
|
2014-07-08 20:06:39 +02:00
|
|
|
|
2014-08-20 19:40:18 +02:00
|
|
|
log.Printf("[INFO] Security Group ID: %s", d.Id())
|
2014-07-17 20:14:51 +02:00
|
|
|
|
|
|
|
// Wait for the security group to truly exist
|
|
|
|
log.Printf(
|
2014-07-29 18:45:57 +02:00
|
|
|
"[DEBUG] Waiting for Security Group (%s) to exist",
|
2014-08-20 19:40:18 +02:00
|
|
|
d.Id())
|
2014-07-17 20:14:51 +02:00
|
|
|
stateConf := &resource.StateChangeConf{
|
|
|
|
Pending: []string{""},
|
2016-01-21 02:20:41 +01:00
|
|
|
Target: []string{"exists"},
|
2015-04-09 16:10:04 +02:00
|
|
|
Refresh: SGStateRefreshFunc(conn, d.Id()),
|
2014-07-17 20:14:51 +02:00
|
|
|
Timeout: 1 * time.Minute,
|
|
|
|
}
|
2015-05-01 21:56:16 +02:00
|
|
|
|
2015-05-05 23:42:08 +02:00
|
|
|
resp, err := stateConf.WaitForState()
|
|
|
|
if err != nil {
|
2014-08-20 19:40:18 +02:00
|
|
|
return fmt.Errorf(
|
2014-07-29 18:45:57 +02:00
|
|
|
"Error waiting for Security Group (%s) to become available: %s",
|
2014-08-20 19:40:18 +02:00
|
|
|
d.Id(), err)
|
2014-07-17 20:14:51 +02:00
|
|
|
}
|
2014-07-08 20:06:39 +02:00
|
|
|
|
2015-05-05 23:42:08 +02:00
|
|
|
// AWS defaults all Security Groups to have an ALLOW ALL egress rule. Here we
|
2016-02-11 17:07:36 +01:00
|
|
|
// revoke that rule, so users don't unknowingly have/use it.
|
2015-05-05 23:42:08 +02:00
|
|
|
group := resp.(*ec2.SecurityGroup)
|
2015-08-17 20:27:16 +02:00
|
|
|
if group.VpcId != nil && *group.VpcId != "" {
|
2015-05-05 23:42:08 +02:00
|
|
|
log.Printf("[DEBUG] Revoking default egress rule for Security Group for %s", d.Id())
|
|
|
|
|
|
|
|
req := &ec2.RevokeSecurityGroupEgressInput{
|
2015-08-17 20:27:16 +02:00
|
|
|
GroupId: createResp.GroupId,
|
|
|
|
IpPermissions: []*ec2.IpPermission{
|
|
|
|
&ec2.IpPermission{
|
2015-07-28 22:29:46 +02:00
|
|
|
FromPort: aws.Int64(int64(0)),
|
|
|
|
ToPort: aws.Int64(int64(0)),
|
2015-08-17 20:27:16 +02:00
|
|
|
IpRanges: []*ec2.IpRange{
|
|
|
|
&ec2.IpRange{
|
|
|
|
CidrIp: aws.String("0.0.0.0/0"),
|
2015-05-05 23:42:08 +02:00
|
|
|
},
|
|
|
|
},
|
2015-08-17 20:27:16 +02:00
|
|
|
IpProtocol: aws.String("-1"),
|
2015-05-05 23:42:08 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err = conn.RevokeSecurityGroupEgress(req); err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error revoking default egress rule for Security Group (%s): %s",
|
|
|
|
d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2015-05-01 21:33:43 +02:00
|
|
|
|
2014-09-30 23:19:16 +02:00
|
|
|
return resourceAwsSecurityGroupUpdate(d, meta)
|
2014-07-08 20:06:39 +02:00
|
|
|
}
|
|
|
|
|
2015-01-23 15:46:20 +01:00
|
|
|
func resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
|
2015-04-16 22:05:55 +02:00
|
|
|
conn := meta.(*AWSClient).ec2conn
|
2014-08-20 20:18:00 +02:00
|
|
|
|
2015-04-09 16:10:04 +02:00
|
|
|
sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())()
|
2014-08-20 20:18:00 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if sgRaw == nil {
|
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-09 16:10:04 +02:00
|
|
|
sg := sgRaw.(*ec2.SecurityGroup)
|
2015-01-23 15:46:20 +01:00
|
|
|
|
2016-03-04 09:28:37 +01:00
|
|
|
remoteIngressRules := resourceAwsSecurityGroupIPPermGather(d.Id(), sg.IpPermissions, sg.OwnerId)
|
|
|
|
remoteEgressRules := resourceAwsSecurityGroupIPPermGather(d.Id(), sg.IpPermissionsEgress, sg.OwnerId)
|
2016-02-11 17:07:36 +01:00
|
|
|
|
|
|
|
localIngressRules := d.Get("ingress").(*schema.Set).List()
|
|
|
|
localEgressRules := d.Get("egress").(*schema.Set).List()
|
|
|
|
|
|
|
|
// Loop through the local state of rules, doing a match against the remote
|
|
|
|
// ruleSet we built above.
|
|
|
|
ingressRules := matchRules("ingress", localIngressRules, remoteIngressRules)
|
|
|
|
egressRules := matchRules("egress", localEgressRules, remoteEgressRules)
|
2015-01-23 15:46:20 +01:00
|
|
|
|
|
|
|
d.Set("description", sg.Description)
|
2015-03-09 16:02:27 +01:00
|
|
|
d.Set("name", sg.GroupName)
|
2015-08-17 20:27:16 +02:00
|
|
|
d.Set("vpc_id", sg.VpcId)
|
|
|
|
d.Set("owner_id", sg.OwnerId)
|
2016-02-11 17:07:36 +01:00
|
|
|
|
2016-01-20 17:52:46 +01:00
|
|
|
if err := d.Set("ingress", ingressRules); err != nil {
|
|
|
|
log.Printf("[WARN] Error setting Ingress rule set for (%s): %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := d.Set("egress", egressRules); err != nil {
|
|
|
|
log.Printf("[WARN] Error setting Egress rule set for (%s): %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
2015-05-12 21:58:10 +02:00
|
|
|
d.Set("tags", tagsToMap(sg.Tags))
|
2015-01-23 15:46:20 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
2015-04-16 22:05:55 +02:00
|
|
|
conn := meta.(*AWSClient).ec2conn
|
2015-01-23 15:46:20 +01:00
|
|
|
|
2015-04-09 16:10:04 +02:00
|
|
|
sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())()
|
2015-01-23 15:46:20 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if sgRaw == nil {
|
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
2015-03-09 16:02:27 +01:00
|
|
|
|
2015-04-09 16:10:04 +02:00
|
|
|
group := sgRaw.(*ec2.SecurityGroup)
|
2015-01-23 15:46:20 +01:00
|
|
|
|
2015-02-18 01:39:25 +01:00
|
|
|
err = resourceAwsSecurityGroupUpdateRules(d, "ingress", meta, group)
|
2015-01-23 15:46:20 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.Get("vpc_id") != nil {
|
2015-02-18 01:39:25 +01:00
|
|
|
err = resourceAwsSecurityGroupUpdateRules(d, "egress", meta, group)
|
2015-01-23 15:46:20 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-12 21:58:10 +02:00
|
|
|
if err := setTags(conn, d); err != nil {
|
2014-10-14 23:07:01 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-01-23 15:46:20 +01:00
|
|
|
d.SetPartial("tags")
|
|
|
|
|
2014-11-24 21:22:18 +01:00
|
|
|
return resourceAwsSecurityGroupRead(d, meta)
|
2014-08-20 19:54:43 +02:00
|
|
|
}
|
|
|
|
|
2014-08-20 19:40:18 +02:00
|
|
|
func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
2015-04-16 22:05:55 +02:00
|
|
|
conn := meta.(*AWSClient).ec2conn
|
2014-07-08 20:06:39 +02:00
|
|
|
|
2014-08-20 19:40:18 +02:00
|
|
|
log.Printf("[DEBUG] Security Group destroy: %v", d.Id())
|
2014-07-08 20:06:39 +02:00
|
|
|
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
2015-04-09 16:10:04 +02:00
|
|
|
_, err := conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{
|
2015-08-17 20:27:16 +02:00
|
|
|
GroupId: aws.String(d.Id()),
|
2015-03-09 16:02:27 +01:00
|
|
|
})
|
2014-10-18 03:21:10 +02:00
|
|
|
if err != nil {
|
2015-05-20 13:21:23 +02:00
|
|
|
ec2err, ok := err.(awserr.Error)
|
2014-10-18 03:29:48 +02:00
|
|
|
if !ok {
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.RetryableError(err)
|
2014-10-18 03:29:48 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 13:21:23 +02:00
|
|
|
switch ec2err.Code() {
|
2014-10-18 03:29:48 +02:00
|
|
|
case "InvalidGroup.NotFound":
|
2014-10-18 03:21:10 +02:00
|
|
|
return nil
|
2014-10-18 03:29:48 +02:00
|
|
|
case "DependencyViolation":
|
|
|
|
// If it is a dependency violation, we want to retry
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.RetryableError(err)
|
2014-10-18 03:29:48 +02:00
|
|
|
default:
|
|
|
|
// Any other error, we want to quit the retry loop immediately
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.NonRetryableError(err)
|
2014-10-18 03:21:10 +02:00
|
|
|
}
|
2014-07-08 20:06:39 +02:00
|
|
|
}
|
|
|
|
|
2014-10-18 03:29:48 +02:00
|
|
|
return nil
|
2014-10-18 03:21:10 +02:00
|
|
|
})
|
2014-07-08 20:06:39 +02:00
|
|
|
}
|
|
|
|
|
2015-01-23 15:46:20 +01:00
|
|
|
func resourceAwsSecurityGroupRuleHash(v interface{}) int {
|
2014-11-21 17:58:34 +01:00
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int)))
|
|
|
|
buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int)))
|
2016-03-25 19:49:44 +01:00
|
|
|
p := protocolForValue(m["protocol"].(string))
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", p))
|
2015-03-17 21:48:10 +01:00
|
|
|
buf.WriteString(fmt.Sprintf("%t-", m["self"].(bool)))
|
2014-10-21 19:49:27 +02:00
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
// We need to make sure to sort the strings below so that we always
|
|
|
|
// generate the same hash code no matter what is in the set.
|
|
|
|
if v, ok := m["cidr_blocks"]; ok {
|
|
|
|
vs := v.([]interface{})
|
|
|
|
s := make([]string, len(vs))
|
|
|
|
for i, raw := range vs {
|
|
|
|
s[i] = raw.(string)
|
2014-07-29 18:06:28 +02:00
|
|
|
}
|
2014-11-21 17:58:34 +01:00
|
|
|
sort.Strings(s)
|
2014-07-29 18:06:28 +02:00
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
for _, v := range s {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v))
|
2014-09-30 23:26:01 +02:00
|
|
|
}
|
2014-11-21 17:58:34 +01:00
|
|
|
}
|
2016-06-06 12:07:19 +02:00
|
|
|
if v, ok := m["prefix_list_ids"]; ok {
|
|
|
|
vs := v.([]interface{})
|
|
|
|
s := make([]string, len(vs))
|
|
|
|
for i, raw := range vs {
|
|
|
|
s[i] = raw.(string)
|
|
|
|
}
|
|
|
|
sort.Strings(s)
|
|
|
|
|
|
|
|
for _, v := range s {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v))
|
|
|
|
}
|
|
|
|
}
|
2014-11-21 17:58:34 +01:00
|
|
|
if v, ok := m["security_groups"]; ok {
|
|
|
|
vs := v.(*schema.Set).List()
|
|
|
|
s := make([]string, len(vs))
|
|
|
|
for i, raw := range vs {
|
|
|
|
s[i] = raw.(string)
|
2014-09-30 23:26:01 +02:00
|
|
|
}
|
2014-11-21 17:58:34 +01:00
|
|
|
sort.Strings(s)
|
2014-10-21 19:49:27 +02:00
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
for _, v := range s {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v))
|
2014-10-21 19:49:27 +02:00
|
|
|
}
|
|
|
|
}
|
2014-07-08 20:06:39 +02:00
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
return hashcode.String(buf.String())
|
2014-07-15 18:40:20 +02:00
|
|
|
}
|
2014-07-17 20:14:51 +02:00
|
|
|
|
2016-03-04 09:28:37 +01:00
|
|
|
func resourceAwsSecurityGroupIPPermGather(groupId string, permissions []*ec2.IpPermission, ownerId *string) []map[string]interface{} {
|
2015-02-18 18:07:46 +01:00
|
|
|
ruleMap := make(map[string]map[string]interface{})
|
|
|
|
for _, perm := range permissions {
|
2015-04-20 17:27:58 +02:00
|
|
|
var fromPort, toPort int64
|
2015-03-11 14:30:43 +01:00
|
|
|
if v := perm.FromPort; v != nil {
|
2015-04-20 17:27:58 +02:00
|
|
|
fromPort = *v
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
2015-03-11 14:30:43 +01:00
|
|
|
if v := perm.ToPort; v != nil {
|
2015-04-20 17:27:58 +02:00
|
|
|
toPort = *v
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
|
2015-08-17 20:27:16 +02:00
|
|
|
k := fmt.Sprintf("%s-%d-%d", *perm.IpProtocol, fromPort, toPort)
|
2015-02-18 18:07:46 +01:00
|
|
|
m, ok := ruleMap[k]
|
|
|
|
if !ok {
|
|
|
|
m = make(map[string]interface{})
|
|
|
|
ruleMap[k] = m
|
|
|
|
}
|
|
|
|
|
2015-03-09 16:02:27 +01:00
|
|
|
m["from_port"] = fromPort
|
|
|
|
m["to_port"] = toPort
|
2015-08-17 20:27:16 +02:00
|
|
|
m["protocol"] = *perm.IpProtocol
|
2015-02-18 18:07:46 +01:00
|
|
|
|
2015-08-17 20:27:16 +02:00
|
|
|
if len(perm.IpRanges) > 0 {
|
2015-02-18 18:07:46 +01:00
|
|
|
raw, ok := m["cidr_blocks"]
|
|
|
|
if !ok {
|
2015-08-17 20:27:16 +02:00
|
|
|
raw = make([]string, 0, len(perm.IpRanges))
|
2015-02-18 18:07:46 +01:00
|
|
|
}
|
|
|
|
list := raw.([]string)
|
|
|
|
|
2015-08-17 20:27:16 +02:00
|
|
|
for _, ip := range perm.IpRanges {
|
|
|
|
list = append(list, *ip.CidrIp)
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
|
2015-02-18 18:07:46 +01:00
|
|
|
m["cidr_blocks"] = list
|
|
|
|
}
|
|
|
|
|
2016-06-06 12:07:19 +02:00
|
|
|
if len(perm.PrefixListIds) > 0 {
|
|
|
|
raw, ok := m["prefix_list_ids"]
|
|
|
|
if !ok {
|
|
|
|
raw = make([]string, 0, len(perm.PrefixListIds))
|
|
|
|
}
|
|
|
|
list := raw.([]string)
|
|
|
|
|
|
|
|
for _, pl := range perm.PrefixListIds {
|
|
|
|
list = append(list, *pl.PrefixListId)
|
|
|
|
}
|
|
|
|
|
|
|
|
m["prefix_list_ids"] = list
|
|
|
|
}
|
|
|
|
|
2016-03-04 09:28:37 +01:00
|
|
|
groups := flattenSecurityGroups(perm.UserIdGroupPairs, ownerId)
|
|
|
|
for i, g := range groups {
|
|
|
|
if *g.GroupId == groupId {
|
2015-02-18 18:07:46 +01:00
|
|
|
groups[i], groups = groups[len(groups)-1], groups[:len(groups)-1]
|
|
|
|
m["self"] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(groups) > 0 {
|
|
|
|
raw, ok := m["security_groups"]
|
|
|
|
if !ok {
|
2016-01-20 17:52:46 +01:00
|
|
|
raw = schema.NewSet(schema.HashString, nil)
|
|
|
|
}
|
|
|
|
list := raw.(*schema.Set)
|
|
|
|
|
|
|
|
for _, g := range groups {
|
2016-03-04 09:28:37 +01:00
|
|
|
if g.GroupName != nil {
|
|
|
|
list.Add(*g.GroupName)
|
|
|
|
} else {
|
|
|
|
list.Add(*g.GroupId)
|
|
|
|
}
|
2015-02-18 18:07:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
m["security_groups"] = list
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rules := make([]map[string]interface{}, 0, len(ruleMap))
|
|
|
|
for _, m := range ruleMap {
|
|
|
|
rules = append(rules, m)
|
|
|
|
}
|
2016-01-20 17:52:46 +01:00
|
|
|
|
2015-02-18 18:07:46 +01:00
|
|
|
return rules
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsSecurityGroupUpdateRules(
|
|
|
|
d *schema.ResourceData, ruleset string,
|
2015-04-09 16:10:04 +02:00
|
|
|
meta interface{}, group *ec2.SecurityGroup) error {
|
|
|
|
|
2015-02-18 18:07:46 +01:00
|
|
|
if d.HasChange(ruleset) {
|
|
|
|
o, n := d.GetChange(ruleset)
|
|
|
|
if o == nil {
|
|
|
|
o = new(schema.Set)
|
|
|
|
}
|
|
|
|
if n == nil {
|
|
|
|
n = new(schema.Set)
|
|
|
|
}
|
|
|
|
|
|
|
|
os := o.(*schema.Set)
|
2015-05-05 23:42:08 +02:00
|
|
|
ns := n.(*schema.Set)
|
2015-04-30 23:59:32 +02:00
|
|
|
|
2015-05-05 05:43:31 +02:00
|
|
|
remove, err := expandIPPerms(group, os.Difference(ns).List())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
add, err := expandIPPerms(group, ns.Difference(os).List())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-02-18 18:07:46 +01:00
|
|
|
|
2015-05-05 23:42:08 +02:00
|
|
|
// TODO: We need to handle partial state better in the in-between
|
2015-02-18 18:07:46 +01:00
|
|
|
// in this update.
|
|
|
|
|
|
|
|
// TODO: It'd be nicer to authorize before removing, but then we have
|
|
|
|
// to deal with complicated unrolling to get individual CIDR blocks
|
|
|
|
// to avoid authorizing already authorized sources. Removing before
|
|
|
|
// adding is easier here, and Terraform should be fast enough to
|
|
|
|
// not have service issues.
|
|
|
|
|
|
|
|
if len(remove) > 0 || len(add) > 0 {
|
2015-04-16 22:05:55 +02:00
|
|
|
conn := meta.(*AWSClient).ec2conn
|
2015-02-18 18:07:46 +01:00
|
|
|
|
2015-03-09 16:02:27 +01:00
|
|
|
var err error
|
2015-02-18 18:07:46 +01:00
|
|
|
if len(remove) > 0 {
|
2015-03-10 21:55:49 +01:00
|
|
|
log.Printf("[DEBUG] Revoking security group %#v %s rule: %#v",
|
2015-03-09 16:02:27 +01:00
|
|
|
group, ruleset, remove)
|
|
|
|
|
2015-02-18 18:07:46 +01:00
|
|
|
if ruleset == "egress" {
|
2015-04-09 16:10:04 +02:00
|
|
|
req := &ec2.RevokeSecurityGroupEgressInput{
|
2015-08-17 20:27:16 +02:00
|
|
|
GroupId: group.GroupId,
|
|
|
|
IpPermissions: remove,
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
2015-04-09 16:10:04 +02:00
|
|
|
_, err = conn.RevokeSecurityGroupEgress(req)
|
2015-03-09 16:02:27 +01:00
|
|
|
} else {
|
2015-04-09 16:10:04 +02:00
|
|
|
req := &ec2.RevokeSecurityGroupIngressInput{
|
2015-08-17 20:27:16 +02:00
|
|
|
GroupId: group.GroupId,
|
|
|
|
IpPermissions: remove,
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
2016-03-04 09:28:37 +01:00
|
|
|
if group.VpcId == nil || *group.VpcId == "" {
|
|
|
|
req.GroupId = nil
|
|
|
|
req.GroupName = group.GroupName
|
|
|
|
}
|
2015-04-09 16:10:04 +02:00
|
|
|
_, err = conn.RevokeSecurityGroupIngress(req)
|
2015-02-18 18:07:46 +01:00
|
|
|
}
|
2015-02-18 18:27:55 +01:00
|
|
|
|
2015-03-09 16:02:27 +01:00
|
|
|
if err != nil {
|
2015-02-18 18:07:46 +01:00
|
|
|
return fmt.Errorf(
|
2016-03-04 09:28:37 +01:00
|
|
|
"Error revoking security group %s rules: %s",
|
2015-02-18 18:07:46 +01:00
|
|
|
ruleset, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(add) > 0 {
|
2015-03-10 21:55:49 +01:00
|
|
|
log.Printf("[DEBUG] Authorizing security group %#v %s rule: %#v",
|
2015-03-09 16:02:27 +01:00
|
|
|
group, ruleset, add)
|
2015-02-18 18:07:46 +01:00
|
|
|
// Authorize the new rules
|
|
|
|
if ruleset == "egress" {
|
2015-04-09 16:10:04 +02:00
|
|
|
req := &ec2.AuthorizeSecurityGroupEgressInput{
|
2015-08-17 20:27:16 +02:00
|
|
|
GroupId: group.GroupId,
|
|
|
|
IpPermissions: add,
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
2015-04-09 16:10:04 +02:00
|
|
|
_, err = conn.AuthorizeSecurityGroupEgress(req)
|
2015-03-09 16:02:27 +01:00
|
|
|
} else {
|
2015-04-09 16:10:04 +02:00
|
|
|
req := &ec2.AuthorizeSecurityGroupIngressInput{
|
2015-08-17 20:27:16 +02:00
|
|
|
GroupId: group.GroupId,
|
|
|
|
IpPermissions: add,
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
2015-08-17 20:27:16 +02:00
|
|
|
if group.VpcId == nil || *group.VpcId == "" {
|
|
|
|
req.GroupId = nil
|
2015-03-18 14:47:59 +01:00
|
|
|
req.GroupName = group.GroupName
|
|
|
|
}
|
|
|
|
|
2015-04-09 16:10:04 +02:00
|
|
|
_, err = conn.AuthorizeSecurityGroupIngress(req)
|
2015-02-18 18:07:46 +01:00
|
|
|
}
|
2015-02-18 18:27:55 +01:00
|
|
|
|
2015-03-09 16:02:27 +01:00
|
|
|
if err != nil {
|
2015-02-18 18:07:46 +01:00
|
|
|
return fmt.Errorf(
|
|
|
|
"Error authorizing security group %s rules: %s",
|
|
|
|
ruleset, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-07-17 20:14:51 +02:00
|
|
|
// SGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
|
|
|
|
// a security group.
|
|
|
|
func SGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {
|
|
|
|
return func() (interface{}, string, error) {
|
2015-04-09 16:10:04 +02:00
|
|
|
req := &ec2.DescribeSecurityGroupsInput{
|
2015-08-17 20:27:16 +02:00
|
|
|
GroupIds: []*string{aws.String(id)},
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
resp, err := conn.DescribeSecurityGroups(req)
|
2014-07-17 20:14:51 +02:00
|
|
|
if err != nil {
|
2015-05-20 13:21:23 +02:00
|
|
|
if ec2err, ok := err.(awserr.Error); ok {
|
|
|
|
if ec2err.Code() == "InvalidSecurityGroupID.NotFound" ||
|
|
|
|
ec2err.Code() == "InvalidGroup.NotFound" {
|
2014-07-17 20:28:40 +02:00
|
|
|
resp = nil
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2014-07-17 20:14:51 +02:00
|
|
|
log.Printf("Error on SGStateRefresh: %s", err)
|
|
|
|
return nil, "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp == nil {
|
|
|
|
return nil, "", nil
|
|
|
|
}
|
|
|
|
|
2015-03-09 16:02:27 +01:00
|
|
|
group := resp.SecurityGroups[0]
|
2014-07-17 20:14:51 +02:00
|
|
|
return group, "exists", nil
|
|
|
|
}
|
|
|
|
}
|
2016-02-11 17:07:36 +01:00
|
|
|
|
|
|
|
// matchRules receives the group id, type of rules, and the local / remote maps
|
|
|
|
// of rules. We iterate through the local set of rules trying to find a matching
|
|
|
|
// remote rule, which may be structured differently because of how AWS
|
|
|
|
// aggregates the rules under the to, from, and type.
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// Matching rules are written to state, with their elements removed from the
|
|
|
|
// remote set
|
|
|
|
//
|
|
|
|
// If no match is found, we'll write the remote rule to state and let the graph
|
|
|
|
// sort things out
|
|
|
|
func matchRules(rType string, local []interface{}, remote []map[string]interface{}) []map[string]interface{} {
|
|
|
|
// For each local ip or security_group, we need to match against the remote
|
|
|
|
// ruleSet until all ips or security_groups are found
|
|
|
|
|
|
|
|
// saves represents the rules that have been identified to be saved to state,
|
|
|
|
// in the appropriate d.Set("{ingress,egress}") call.
|
|
|
|
var saves []map[string]interface{}
|
|
|
|
for _, raw := range local {
|
|
|
|
l := raw.(map[string]interface{})
|
|
|
|
|
|
|
|
var selfVal bool
|
|
|
|
if v, ok := l["self"]; ok {
|
|
|
|
selfVal = v.(bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
// matching against self is required to detect rules that only include self
|
|
|
|
// as the rule. resourceAwsSecurityGroupIPPermGather parses the group out
|
|
|
|
// and replaces it with self if it's ID is found
|
|
|
|
localHash := idHash(rType, l["protocol"].(string), int64(l["to_port"].(int)), int64(l["from_port"].(int)), selfVal)
|
|
|
|
|
|
|
|
// loop remote rules, looking for a matching hash
|
|
|
|
for _, r := range remote {
|
|
|
|
var remoteSelfVal bool
|
|
|
|
if v, ok := r["self"]; ok {
|
|
|
|
remoteSelfVal = v.(bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
// hash this remote rule and compare it for a match consideration with the
|
|
|
|
// local rule we're examining
|
|
|
|
rHash := idHash(rType, r["protocol"].(string), r["to_port"].(int64), r["from_port"].(int64), remoteSelfVal)
|
|
|
|
if rHash == localHash {
|
2016-06-06 12:07:19 +02:00
|
|
|
var numExpectedCidrs, numExpectedPrefixLists, numExpectedSGs, numRemoteCidrs, numRemotePrefixLists, numRemoteSGs int
|
2016-02-11 17:07:36 +01:00
|
|
|
var matchingCidrs []string
|
|
|
|
var matchingSGs []string
|
2016-06-06 12:07:19 +02:00
|
|
|
var matchingPrefixLists []string
|
2016-02-11 17:07:36 +01:00
|
|
|
|
|
|
|
// grab the local/remote cidr and sg groups, capturing the expected and
|
|
|
|
// actual counts
|
|
|
|
lcRaw, ok := l["cidr_blocks"]
|
|
|
|
if ok {
|
|
|
|
numExpectedCidrs = len(l["cidr_blocks"].([]interface{}))
|
|
|
|
}
|
2016-06-06 12:07:19 +02:00
|
|
|
lpRaw, ok := l["prefix_list_ids"]
|
|
|
|
if ok {
|
|
|
|
numExpectedPrefixLists = len(l["prefix_list_ids"].([]interface{}))
|
|
|
|
}
|
2016-02-11 17:07:36 +01:00
|
|
|
lsRaw, ok := l["security_groups"]
|
|
|
|
if ok {
|
|
|
|
numExpectedSGs = len(l["security_groups"].(*schema.Set).List())
|
|
|
|
}
|
|
|
|
|
|
|
|
rcRaw, ok := r["cidr_blocks"]
|
|
|
|
if ok {
|
|
|
|
numRemoteCidrs = len(r["cidr_blocks"].([]string))
|
|
|
|
}
|
2016-06-06 12:07:19 +02:00
|
|
|
rpRaw, ok := r["prefix_list_ids"]
|
|
|
|
if ok {
|
|
|
|
numRemotePrefixLists = len(r["prefix_list_ids"].([]string))
|
|
|
|
}
|
2016-02-11 17:07:36 +01:00
|
|
|
|
|
|
|
rsRaw, ok := r["security_groups"]
|
|
|
|
if ok {
|
|
|
|
numRemoteSGs = len(r["security_groups"].(*schema.Set).List())
|
|
|
|
}
|
|
|
|
|
|
|
|
// check some early failures
|
|
|
|
if numExpectedCidrs > numRemoteCidrs {
|
|
|
|
log.Printf("[DEBUG] Local rule has more CIDR blocks, continuing (%d/%d)", numExpectedCidrs, numRemoteCidrs)
|
|
|
|
continue
|
|
|
|
}
|
2016-06-06 12:07:19 +02:00
|
|
|
if numExpectedPrefixLists > numRemotePrefixLists {
|
|
|
|
log.Printf("[DEBUG] Local rule has more prefix lists, continuing (%d/%d)", numExpectedPrefixLists, numRemotePrefixLists)
|
|
|
|
continue
|
|
|
|
}
|
2016-02-11 17:07:36 +01:00
|
|
|
if numExpectedSGs > numRemoteSGs {
|
|
|
|
log.Printf("[DEBUG] Local rule has more Security Groups, continuing (%d/%d)", numExpectedSGs, numRemoteSGs)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// match CIDRs by converting both to sets, and using Set methods
|
|
|
|
var localCidrs []interface{}
|
|
|
|
if lcRaw != nil {
|
|
|
|
localCidrs = lcRaw.([]interface{})
|
|
|
|
}
|
|
|
|
localCidrSet := schema.NewSet(schema.HashString, localCidrs)
|
|
|
|
|
|
|
|
// remote cidrs are presented as a slice of strings, so we need to
|
|
|
|
// reformat them into a slice of interfaces to be used in creating the
|
|
|
|
// remote cidr set
|
|
|
|
var remoteCidrs []string
|
|
|
|
if rcRaw != nil {
|
|
|
|
remoteCidrs = rcRaw.([]string)
|
|
|
|
}
|
|
|
|
// convert remote cidrs to a set, for easy comparisions
|
|
|
|
var list []interface{}
|
|
|
|
for _, s := range remoteCidrs {
|
|
|
|
list = append(list, s)
|
|
|
|
}
|
|
|
|
remoteCidrSet := schema.NewSet(schema.HashString, list)
|
|
|
|
|
|
|
|
// Build up a list of local cidrs that are found in the remote set
|
|
|
|
for _, s := range localCidrSet.List() {
|
|
|
|
if remoteCidrSet.Contains(s) {
|
|
|
|
matchingCidrs = append(matchingCidrs, s.(string))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-06 12:07:19 +02:00
|
|
|
// match prefix lists by converting both to sets, and using Set methods
|
|
|
|
var localPrefixLists []interface{}
|
|
|
|
if lpRaw != nil {
|
|
|
|
localPrefixLists = lpRaw.([]interface{})
|
|
|
|
}
|
|
|
|
localPrefixListsSet := schema.NewSet(schema.HashString, localPrefixLists)
|
|
|
|
|
|
|
|
// remote prefix lists are presented as a slice of strings, so we need to
|
|
|
|
// reformat them into a slice of interfaces to be used in creating the
|
|
|
|
// remote prefix list set
|
|
|
|
var remotePrefixLists []string
|
|
|
|
if rpRaw != nil {
|
|
|
|
remotePrefixLists = rpRaw.([]string)
|
|
|
|
}
|
|
|
|
// convert remote prefix lists to a set, for easy comparison
|
|
|
|
list = nil
|
|
|
|
for _, s := range remotePrefixLists {
|
|
|
|
list = append(list, s)
|
|
|
|
}
|
|
|
|
remotePrefixListsSet := schema.NewSet(schema.HashString, list)
|
|
|
|
|
|
|
|
// Build up a list of local prefix lists that are found in the remote set
|
|
|
|
for _, s := range localPrefixListsSet.List() {
|
|
|
|
if remotePrefixListsSet.Contains(s) {
|
|
|
|
matchingPrefixLists = append(matchingPrefixLists, s.(string))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-11 17:07:36 +01:00
|
|
|
// match SGs. Both local and remote are already sets
|
|
|
|
var localSGSet *schema.Set
|
|
|
|
if lsRaw == nil {
|
|
|
|
localSGSet = schema.NewSet(schema.HashString, nil)
|
|
|
|
} else {
|
|
|
|
localSGSet = lsRaw.(*schema.Set)
|
|
|
|
}
|
|
|
|
|
|
|
|
var remoteSGSet *schema.Set
|
|
|
|
if rsRaw == nil {
|
|
|
|
remoteSGSet = schema.NewSet(schema.HashString, nil)
|
|
|
|
} else {
|
|
|
|
remoteSGSet = rsRaw.(*schema.Set)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build up a list of local security groups that are found in the remote set
|
|
|
|
for _, s := range localSGSet.List() {
|
|
|
|
if remoteSGSet.Contains(s) {
|
|
|
|
matchingSGs = append(matchingSGs, s.(string))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// compare equalities for matches.
|
|
|
|
// If we found the number of cidrs and number of sgs, we declare a
|
|
|
|
// match, and then remove those elements from the remote rule, so that
|
|
|
|
// this remote rule can still be considered by other local rules
|
|
|
|
if numExpectedCidrs == len(matchingCidrs) {
|
2016-06-06 12:07:19 +02:00
|
|
|
if numExpectedPrefixLists == len(matchingPrefixLists) {
|
|
|
|
if numExpectedSGs == len(matchingSGs) {
|
|
|
|
// confirm that self references match
|
|
|
|
var lSelf bool
|
|
|
|
var rSelf bool
|
|
|
|
if _, ok := l["self"]; ok {
|
|
|
|
lSelf = l["self"].(bool)
|
2016-02-11 17:07:36 +01:00
|
|
|
}
|
2016-06-06 12:07:19 +02:00
|
|
|
if _, ok := r["self"]; ok {
|
|
|
|
rSelf = r["self"].(bool)
|
2016-02-11 17:07:36 +01:00
|
|
|
}
|
2016-06-06 12:07:19 +02:00
|
|
|
if rSelf == lSelf {
|
|
|
|
delete(r, "self")
|
|
|
|
// pop local cidrs from remote
|
|
|
|
diffCidr := remoteCidrSet.Difference(localCidrSet)
|
|
|
|
var newCidr []string
|
|
|
|
for _, cRaw := range diffCidr.List() {
|
|
|
|
newCidr = append(newCidr, cRaw.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
// reassigning
|
|
|
|
if len(newCidr) > 0 {
|
|
|
|
r["cidr_blocks"] = newCidr
|
|
|
|
} else {
|
|
|
|
delete(r, "cidr_blocks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// pop local prefix lists from remote
|
|
|
|
diffPrefixLists := remotePrefixListsSet.Difference(localPrefixListsSet)
|
|
|
|
var newPrefixLists []string
|
|
|
|
for _, pRaw := range diffPrefixLists.List() {
|
|
|
|
newPrefixLists = append(newPrefixLists, pRaw.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
// reassigning
|
|
|
|
if len(newPrefixLists) > 0 {
|
|
|
|
r["prefix_list_ids"] = newPrefixLists
|
|
|
|
} else {
|
|
|
|
delete(r, "prefix_list_ids")
|
|
|
|
}
|
|
|
|
|
|
|
|
// pop local sgs from remote
|
|
|
|
diffSGs := remoteSGSet.Difference(localSGSet)
|
|
|
|
if len(diffSGs.List()) > 0 {
|
|
|
|
r["security_groups"] = diffSGs
|
|
|
|
} else {
|
|
|
|
delete(r, "security_groups")
|
|
|
|
}
|
|
|
|
|
|
|
|
saves = append(saves, l)
|
2016-02-11 17:07:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Here we catch any remote rules that have not been stripped of all self,
|
|
|
|
// cidrs, and security groups. We'll add remote rules here that have not been
|
|
|
|
// matched locally, and let the graph sort things out. This will happen when
|
|
|
|
// rules are added externally to Terraform
|
|
|
|
for _, r := range remote {
|
2016-06-06 12:07:19 +02:00
|
|
|
var lenCidr, lenPrefixLists, lenSGs int
|
2016-02-11 17:07:36 +01:00
|
|
|
if rCidrs, ok := r["cidr_blocks"]; ok {
|
|
|
|
lenCidr = len(rCidrs.([]string))
|
|
|
|
}
|
2016-06-06 12:07:19 +02:00
|
|
|
if rPrefixLists, ok := r["prefix_list_ids"]; ok {
|
|
|
|
lenPrefixLists = len(rPrefixLists.([]string))
|
|
|
|
}
|
2016-02-11 17:07:36 +01:00
|
|
|
if rawSGs, ok := r["security_groups"]; ok {
|
|
|
|
lenSGs = len(rawSGs.(*schema.Set).List())
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := r["self"]; ok {
|
|
|
|
if r["self"].(bool) == true {
|
|
|
|
lenSGs++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-06 12:07:19 +02:00
|
|
|
if lenSGs+lenCidr+lenPrefixLists > 0 {
|
2016-02-11 17:07:36 +01:00
|
|
|
log.Printf("[DEBUG] Found a remote Rule that wasn't empty: (%#v)", r)
|
|
|
|
saves = append(saves, r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return saves
|
|
|
|
}
|
|
|
|
|
|
|
|
// Creates a unique hash for the type, ports, and protocol, used as a key in
|
|
|
|
// maps
|
|
|
|
func idHash(rType, protocol string, toPort, fromPort int64, self bool) string {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", rType))
|
|
|
|
buf.WriteString(fmt.Sprintf("%d-", toPort))
|
|
|
|
buf.WriteString(fmt.Sprintf("%d-", fromPort))
|
2016-03-07 22:40:29 +01:00
|
|
|
buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(protocol)))
|
2016-02-11 17:07:36 +01:00
|
|
|
buf.WriteString(fmt.Sprintf("%t-", self))
|
|
|
|
|
|
|
|
return fmt.Sprintf("rule-%d", hashcode.String(buf.String()))
|
|
|
|
}
|
2016-03-25 19:49:44 +01:00
|
|
|
|
|
|
|
// protocolStateFunc ensures we only store a string in any protocol field
|
|
|
|
func protocolStateFunc(v interface{}) string {
|
|
|
|
switch v.(type) {
|
|
|
|
case string:
|
|
|
|
p := protocolForValue(v.(string))
|
|
|
|
return p
|
|
|
|
default:
|
|
|
|
log.Printf("[WARN] Non String value given for Protocol: %#v", v)
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// protocolForValue converts a valid Internet Protocol number into it's name
|
|
|
|
// representation. If a name is given, it validates that it's a proper protocol
|
|
|
|
// name. Names/numbers are as defined at
|
|
|
|
// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
|
|
|
func protocolForValue(v string) string {
|
|
|
|
// special case -1
|
|
|
|
protocol := strings.ToLower(v)
|
|
|
|
if protocol == "-1" || protocol == "all" {
|
|
|
|
return "-1"
|
|
|
|
}
|
|
|
|
// if it's a name like tcp, return that
|
|
|
|
if _, ok := protocolIntegers()[protocol]; ok {
|
|
|
|
return protocol
|
|
|
|
}
|
|
|
|
// convert to int, look for that value
|
|
|
|
p, err := strconv.Atoi(protocol)
|
|
|
|
if err != nil {
|
|
|
|
// we were unable to convert to int, suggesting a string name, but it wasn't
|
|
|
|
// found above
|
|
|
|
log.Printf("[WARN] Unable to determine valid protocol: %s", err)
|
|
|
|
return protocol
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, v := range protocolIntegers() {
|
|
|
|
if p == v {
|
|
|
|
// guard against protocolIntegers sometime in the future not having lower
|
|
|
|
// case ids in the map
|
|
|
|
return strings.ToLower(k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// fall through
|
|
|
|
log.Printf("[WARN] Unable to determine valid protocol: no matching protocols found")
|
|
|
|
return protocol
|
|
|
|
}
|