diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index dda16a032..905e56d4f 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -15,6 +15,7 @@ import ( "golang.org/x/oauth2/jwt" computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" + "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" "google.golang.org/api/storage/v1" ) @@ -28,6 +29,7 @@ type Config struct { clientCompute *compute.Service clientComputeBeta *computeBeta.Service + clientContainer *container.Service clientDns *dns.Service clientStorage *storage.Service } @@ -58,6 +60,7 @@ func (c *Config) loadAndValidate() error { clientScopes := []string{ "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite", "https://www.googleapis.com/auth/devstorage.full_control", } @@ -119,6 +122,13 @@ func (c *Config) loadAndValidate() error { } c.clientComputeBeta.UserAgent = userAgent + log.Printf("[INFO] Instantiating GKE client...") + c.clientContainer, err = container.New(client) + if err != nil { + return err + } + c.clientContainer.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google Cloud DNS client...") c.clientDns, err = dns.New(client) if err != nil { diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 1554d9154..b19d9fcea 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -39,6 +39,7 @@ func Provider() terraform.ResourceProvider { "google_compute_network": resourceComputeNetwork(), "google_compute_route": resourceComputeRoute(), "google_compute_target_pool": resourceComputeTargetPool(), + "google_container_cluster": resourceContainerCluster(), "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), "google_storage_bucket": resourceStorageBucket(), diff --git a/builtin/providers/google/resource_container_cluster.go b/builtin/providers/google/resource_container_cluster.go new file mode 100644 index 000000000..be9573813 --- /dev/null +++ b/builtin/providers/google/resource_container_cluster.go @@ -0,0 +1,445 @@ +package google + +import ( + "fmt" + "log" + "net" + "regexp" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/container/v1" +) + +func resourceContainerCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerClusterCreate, + Read: resourceContainerClusterRead, + Update: resourceContainerClusterUpdate, + Delete: resourceContainerClusterDelete, + + Schema: map[string]*schema.Schema{ + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "node_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "cluster_ipv4_cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, ipnet, err := net.ParseCIDR(value) + + if err != nil || ipnet == nil || value != ipnet.String() { + errors = append(errors, fmt.Errorf( + "%q must contain a valid CIDR", k)) + } + return + }, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "logging_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "monitoring_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "master_auth": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_certificate": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "client_key": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cluster_ca_certificate": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 40 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 40 characters", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "default", + ForceNew: true, + }, + + "node_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "disk_size_gb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + if value < 10 { + errors = append(errors, fmt.Errorf( + "%q cannot be less than 10", k)) + } + return + }, + }, + + "oauth_scopes": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "instance_group_urls": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + + masterAuths := d.Get("master_auth").([]interface{}) + if len(masterAuths) > 1 { + return fmt.Errorf("Cannot specify more than one master_auth.") + } + masterAuth := masterAuths[0].(map[string]interface{}) + + cluster := &container.Cluster{ + MasterAuth: &container.MasterAuth{ + Password: masterAuth["password"].(string), + Username: masterAuth["username"].(string), + }, + Name: clusterName, + InitialNodeCount: int64(d.Get("initial_node_count").(int)), + } + + if v, ok := d.GetOk("cluster_ipv4_cidr"); ok { + cluster.ClusterIpv4Cidr = v.(string) + } + + if v, ok := d.GetOk("description"); ok { + cluster.Description = v.(string) + } + + if v, ok := d.GetOk("logging_service"); ok { + cluster.LoggingService = v.(string) + } + + if v, ok := d.GetOk("monitoring_service"); ok { + cluster.MonitoringService = v.(string) + } + + if v, ok := d.GetOk("network"); ok { + cluster.Network = v.(string) + } + + if v, ok := d.GetOk("node_config"); ok { + nodeConfigs := v.([]interface{}) + if len(nodeConfigs) > 1 { + return fmt.Errorf("Cannot specify more than one node_config.") + } + nodeConfig := nodeConfigs[0].(map[string]interface{}) + + cluster.NodeConfig = &container.NodeConfig{} + + if v, ok = nodeConfig["machine_type"]; ok { + cluster.NodeConfig.MachineType = v.(string) + } + + if v, ok = nodeConfig["disk_size_gb"]; ok { + cluster.NodeConfig.DiskSizeGb = v.(int64) + } + + if v, ok := nodeConfig["oauth_scopes"]; ok { + scopesList := v.([]interface{}) + scopes := []string{} + for _, v := range scopesList { + scopes = append(scopes, v.(string)) + } + + cluster.NodeConfig.OauthScopes = scopes + } + } + + req := &container.CreateClusterRequest{ + Cluster: cluster, + } + + op, err := config.clientContainer.Projects.Zones.Clusters.Create( + config.Project, zoneName, req).Do() + if err != nil { + return err + } + + // Wait until it's created + wait := resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Timeout: 30 * time.Minute, + MinTimeout: 3 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := config.clientContainer.Projects.Zones.Operations.Get( + config.Project, zoneName, op.Name).Do() + log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s", + clusterName, resp.Status) + return resp, resp.Status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s has been created", clusterName) + + d.SetId(clusterName) + + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zoneName := d.Get("zone").(string) + + cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( + config.Project, zoneName, d.Get("name").(string)).Do() + if err != nil { + return err + } + + d.Set("name", cluster.Name) + d.Set("zone", cluster.Zone) + d.Set("endpoint", cluster.Endpoint) + + masterAuth := []map[string]interface{}{ + map[string]interface{}{ + "username": cluster.MasterAuth.Username, + "password": cluster.MasterAuth.Password, + "client_certificate": cluster.MasterAuth.ClientCertificate, + "client_key": cluster.MasterAuth.ClientKey, + "cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate, + }, + } + d.Set("master_auth", masterAuth) + + d.Set("initial_node_count", cluster.InitialNodeCount) + d.Set("node_version", cluster.CurrentNodeVersion) + d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr) + d.Set("description", cluster.Description) + d.Set("logging_service", cluster.LoggingService) + d.Set("monitoring_service", cluster.MonitoringService) + d.Set("network", cluster.Network) + d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) + d.Set("instance_group_urls", cluster.InstanceGroupUrls) + + return nil +} + +func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + desiredNodeVersion := d.Get("node_version").(string) + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodeVersion: desiredNodeVersion, + }, + } + op, err := config.clientContainer.Projects.Zones.Clusters.Update( + config.Project, zoneName, clusterName, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + wait := resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Timeout: 10 * time.Minute, + MinTimeout: 2 * time.Second, + Refresh: func() (interface{}, string, error) { + log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName) + resp, err := config.clientContainer.Projects.Zones.Operations.Get( + config.Project, zoneName, op.Name).Do() + log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s", + clusterName, resp.Status) + return resp, resp.Status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(), + desiredNodeVersion) + + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) + op, err := config.clientContainer.Projects.Zones.Clusters.Delete( + config.Project, zoneName, clusterName).Do() + if err != nil { + return err + } + + // Wait until it's deleted + wait := resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Timeout: 10 * time.Minute, + MinTimeout: 3 * time.Second, + Refresh: func() (interface{}, string, error) { + log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName) + resp, err := config.clientContainer.Projects.Zones.Operations.Get( + config.Project, zoneName, op.Name).Do() + log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s", + clusterName, resp.Status) + return resp, resp.Status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} { + config := []map[string]interface{}{ + map[string]interface{}{ + "machine_type": c.MachineType, + "disk_size_gb": c.DiskSizeGb, + }, + } + + if len(c.OauthScopes) > 0 { + config[0]["oauth_scopes"] = c.OauthScopes + } + + return config +} diff --git a/builtin/providers/google/resource_container_cluster_test.go b/builtin/providers/google/resource_container_cluster_test.go new file mode 100644 index 000000000..daced5513 --- /dev/null +++ b/builtin/providers/google/resource_container_cluster_test.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccContainerCluster_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerClusterExists( + "google_container_cluster.primary"), + ), + }, + }, + }) +} + +func testAccCheckContainerClusterDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_container_cluster" { + continue + } + + attributes := rs.Primary.Attributes + _, err := config.clientContainer.Projects.Zones.Clusters.Get( + config.Project, attributes["zone"], attributes["name"]).Do() + if err == nil { + return fmt.Errorf("Cluster still exists") + } + } + + return nil +} + +func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + attributes := rs.Primary.Attributes + found, err := config.clientContainer.Projects.Zones.Clusters.Get( + config.Project, attributes["zone"], attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != attributes["name"] { + return fmt.Errorf("Cluster not found") + } + + return nil + } +} + +const testAccContainerCluster_basic = ` +resource "google_container_cluster" "primary" { + name = "terraform-foo-bar-test" + zone = "us-central1-a" + initial_node_count = 3 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +}` diff --git a/website/source/docs/providers/google/r/container_cluster.html.markdown b/website/source/docs/providers/google/r/container_cluster.html.markdown new file mode 100644 index 000000000..90ec8c9f6 --- /dev/null +++ b/website/source/docs/providers/google/r/container_cluster.html.markdown @@ -0,0 +1,75 @@ +--- +layout: "google" +page_title: "Google: google_container_cluster" +sidebar_current: "docs-google-resource-container-cluster" +description: |- + Creates a GKE cluster. +--- + +# google\_container\_cluster + +-> **Note:** Due to limitations of the API, all arguments except `node_version` are non-updateable (changing any will cause recreation of the whole cluster). + +## Example usage + +``` +resource "google_container_cluster" "primary" { + name = "marcellus-wallace" + zone = "us-central1-a" + initial_node_count = 3 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +} +``` + +## Argument Reference + +* `name` - (Required) The name of the cluster, unique within the project and zone +* `zone` - (Required) The zone that all resources should be created in. +* `master_auth` - (Required) The authentication information for accessing the Kubernetes master +* `initial_node_count` - (Required) The number of nodes to create in this cluster (not including the Kubernetes master) +* `description` - (Optional) Description of the cluster +* `node_version` - (Optional) The Kubernetes version on the nodes. Only valid for upgrading of existing cluster. + Defaults to latest version supported by the server. +* `cluster_ipv4_cidr` - (Optional) The IP address range of the container pods in this cluster. + Default is an automatically assigned CIDR. +* `logging_service` - (Optional) The logging service that the cluster should write logs to. + Available options include `logging.googleapis.com` and `none`. Defaults to `logging.googleapis.com` +* `monitoring_service` - (Optional) The monitoring service that the cluster should write metrics to. + Available options include `monitoring.googleapis.com` and `none`. Defaults to `monitoring.googleapis.com` +* `network` - (Optional) The name of the Google Compute Engine network to which the cluster is connected +* `node_config` - (Optional)The machine type and image to use for all nodes in this cluster + +**Master Auth** supports the following arguments: + +* `password` - The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint +* `username` - The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint + +**Node Config** supports the following arguments: + +* `machine_type` - (Optional) The name of a Google Compute Engine machine type. + Defaults to `n1-standard-1`. +* `disk_size_in_gb` - (Optional) Size of the disk attached to each node, specified in GB. + The smallest allowed disk size is 10GB. Defaults to 100GB. +* `oauth_scopes` - (Optional) The set of Google API scopes to be made available on all + of the node VMs under the "default" service account. The following scopes are necessary + to ensure the correct functioning of the cluster: + + * `https://www.googleapis.com/auth/compute` + * `https://www.googleapis.com/auth/devstorage.read_only` + * `https://www.googleapis.com/auth/logging.write` (if `logging_service` points to Google) + * `https://www.googleapis.com/auth/monitoring` (if `monitoring_service` points to Google) + +## Attributes Reference + +* `master_auth.client_certificate` - Base64 encoded public certificate + used by clients to authenticate to the cluster endpoint. +* `master_auth.client_key` - Base64 encoded private key used by clients + to authenticate to the cluster endpoint +* `master_auth.cluster_ca_certificate` - Base64 encoded public certificate + that is the root of trust for the cluster +* `endpoint` - The IP address of this cluster's Kubernetes master +* `instance_group_urls` - List of instance group URLs which have been assigned to the cluster diff --git a/website/source/layouts/google.erb b/website/source/layouts/google.erb index 1cca724da..be706a745 100644 --- a/website/source/layouts/google.erb +++ b/website/source/layouts/google.erb @@ -53,6 +53,10 @@ google_compute_target_pool +