terraform/builtin/providers/docker/resource_docker_container_f...

459 lines
12 KiB
Go
Raw Normal View History

package docker
import (
"archive/tar"
"bytes"
"errors"
"fmt"
"strconv"
"time"
dc "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/terraform/helper/schema"
)
var (
creationTime time.Time
)
func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) error {
2015-03-29 03:37:20 +02:00
var err error
client := meta.(*dc.Client)
2015-03-29 03:37:20 +02:00
var data Data
if err := fetchLocalImages(&data, client); err != nil {
return err
}
image := d.Get("image").(string)
if _, ok := data.DockerImages[image]; !ok {
if _, ok := data.DockerImages[image+":latest"]; !ok {
return fmt.Errorf("Unable to find image %s", image)
}
image = image + ":latest"
}
// The awesome, wonderful, splendiferous, sensical
// Docker API now lets you specify a HostConfig in
// CreateContainerOptions, but in my testing it still only
// actually applies HostConfig options set in StartContainer.
// How cool is that?
createOpts := dc.CreateContainerOptions{
Name: d.Get("name").(string),
Config: &dc.Config{
Image: image,
Hostname: d.Get("hostname").(string),
Domainname: d.Get("domainname").(string),
},
}
if v, ok := d.GetOk("env"); ok {
createOpts.Config.Env = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("command"); ok {
createOpts.Config.Cmd = stringListToStringSlice(v.([]interface{}))
for _, v := range createOpts.Config.Cmd {
if v == "" {
return fmt.Errorf("values for command may not be empty")
}
}
}
if v, ok := d.GetOk("entrypoint"); ok {
createOpts.Config.Entrypoint = stringListToStringSlice(v.([]interface{}))
}
if v, ok := d.GetOk("user"); ok {
createOpts.Config.User = v.(string)
}
exposedPorts := map[dc.Port]struct{}{}
portBindings := map[dc.Port][]dc.PortBinding{}
if v, ok := d.GetOk("ports"); ok {
exposedPorts, portBindings = portSetToDockerPorts(v.(*schema.Set))
}
if len(exposedPorts) != 0 {
createOpts.Config.ExposedPorts = exposedPorts
}
extraHosts := []string{}
if v, ok := d.GetOk("host"); ok {
extraHosts = extraHostsSetToDockerExtraHosts(v.(*schema.Set))
}
volumes := map[string]struct{}{}
binds := []string{}
volumesFrom := []string{}
if v, ok := d.GetOk("volumes"); ok {
volumes, binds, volumesFrom, err = volumeSetToDockerVolumes(v.(*schema.Set))
if err != nil {
return fmt.Errorf("Unable to parse volumes: %s", err)
}
}
if len(volumes) != 0 {
createOpts.Config.Volumes = volumes
}
if v, ok := d.GetOk("labels"); ok {
createOpts.Config.Labels = mapTypeMapValsToString(v.(map[string]interface{}))
}
hostConfig := &dc.HostConfig{
2015-06-24 07:31:24 +02:00
Privileged: d.Get("privileged").(bool),
PublishAllPorts: d.Get("publish_all_ports").(bool),
RestartPolicy: dc.RestartPolicy{
Name: d.Get("restart").(string),
MaximumRetryCount: d.Get("max_retry_count").(int),
},
LogConfig: dc.LogConfig{
Type: d.Get("log_driver").(string),
},
}
if len(portBindings) != 0 {
hostConfig.PortBindings = portBindings
}
if len(extraHosts) != 0 {
hostConfig.ExtraHosts = extraHosts
}
if len(binds) != 0 {
hostConfig.Binds = binds
}
if len(volumesFrom) != 0 {
hostConfig.VolumesFrom = volumesFrom
}
if v, ok := d.GetOk("capabilities"); ok {
for _, capInt := range v.(*schema.Set).List() {
capa := capInt.(map[string]interface{})
hostConfig.CapAdd = stringSetToStringSlice(capa["add"].(*schema.Set))
hostConfig.CapDrop = stringSetToStringSlice(capa["drop"].(*schema.Set))
break
}
}
if v, ok := d.GetOk("dns"); ok {
hostConfig.DNS = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("dns_opts"); ok {
hostConfig.DNSOptions = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("dns_search"); ok {
hostConfig.DNSSearch = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("links"); ok {
hostConfig.Links = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("memory"); ok {
hostConfig.Memory = int64(v.(int)) * 1024 * 1024
}
if v, ok := d.GetOk("memory_swap"); ok {
swap := int64(v.(int))
if swap > 0 {
swap = swap * 1024 * 1024
}
hostConfig.MemorySwap = swap
}
if v, ok := d.GetOk("cpu_shares"); ok {
hostConfig.CPUShares = int64(v.(int))
}
if v, ok := d.GetOk("log_opts"); ok {
hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{}))
}
2016-01-01 09:57:21 +01:00
if v, ok := d.GetOk("network_mode"); ok {
2016-01-01 10:12:43 +01:00
hostConfig.NetworkMode = v.(string)
2016-01-01 09:57:21 +01:00
}
createOpts.HostConfig = hostConfig
var retContainer *dc.Container
if retContainer, err = client.CreateContainer(createOpts); err != nil {
return fmt.Errorf("Unable to create container: %s", err)
}
if retContainer == nil {
return fmt.Errorf("Returned container is nil")
}
d.SetId(retContainer.ID)
2016-01-04 20:58:54 +01:00
if v, ok := d.GetOk("networks"); ok {
2016-01-04 21:03:53 +01:00
connectionOpts := dc.NetworkConnectionOptions{Container: retContainer.ID}
2016-01-04 20:58:54 +01:00
for _, rawNetwork := range v.(*schema.Set).List() {
network := rawNetwork.(string)
2016-01-30 22:49:35 +01:00
if err := client.ConnectNetwork(network, connectionOpts); err != nil {
return fmt.Errorf("Unable to connect to network '%s': %s", network, err)
}
2016-01-04 20:58:54 +01:00
}
}
if v, ok := d.GetOk("upload"); ok {
for _, upload := range v.(*schema.Set).List() {
content := upload.(map[string]interface{})["content"].(string)
file := upload.(map[string]interface{})["file"].(string)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
hdr := &tar.Header{
Name: file,
Mode: 0644,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("Error creating tar archive: %s", err)
}
if _, err := tw.Write([]byte(content)); err != nil {
return fmt.Errorf("Error creating tar archive: %s", err)
}
if err := tw.Close(); err != nil {
return fmt.Errorf("Error creating tar archive: %s", err)
}
uploadOpts := dc.UploadToContainerOptions{
InputStream: bytes.NewReader(buf.Bytes()),
Path: "/",
}
if err := client.UploadToContainer(retContainer.ID, uploadOpts); err != nil {
return fmt.Errorf("Unable to upload volume content: %s", err)
}
}
}
creationTime = time.Now()
if err := client.StartContainer(retContainer.ID, nil); err != nil {
return fmt.Errorf("Unable to start container: %s", err)
}
return resourceDockerContainerRead(d, meta)
}
func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error {
2015-03-29 03:37:20 +02:00
client := meta.(*dc.Client)
apiContainer, err := fetchDockerContainer(d.Id(), client)
if err != nil {
return err
}
if apiContainer == nil {
// This container doesn't exist anymore
d.SetId("")
return nil
}
var container *dc.Container
loops := 1 // if it hasn't just been created, don't delay
if !creationTime.IsZero() {
loops = 30 // with 500ms spacing, 15 seconds; ought to be plenty
}
sleepTime := 500 * time.Millisecond
for i := loops; i > 0; i-- {
container, err = client.InspectContainer(apiContainer.ID)
if err != nil {
return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err)
}
if container.State.Running ||
2015-10-08 14:48:04 +02:00
!container.State.Running && !d.Get("must_run").(bool) {
break
}
if creationTime.IsZero() { // We didn't just create it, so don't wait around
return resourceDockerContainerDelete(d, meta)
}
if container.State.FinishedAt.After(creationTime) {
// It exited immediately, so error out so dependent containers
// aren't started
resourceDockerContainerDelete(d, meta)
return fmt.Errorf("Container %s exited after creation, error was: %s", apiContainer.ID, container.State.Error)
}
time.Sleep(sleepTime)
}
// Handle the case of the for loop above running its course
if !container.State.Running && d.Get("must_run").(bool) {
resourceDockerContainerDelete(d, meta)
return fmt.Errorf("Container %s failed to be in running state", apiContainer.ID)
}
// Read Network Settings
if container.NetworkSettings != nil {
d.Set("ip_address", container.NetworkSettings.IPAddress)
d.Set("ip_prefix_length", container.NetworkSettings.IPPrefixLen)
d.Set("gateway", container.NetworkSettings.Gateway)
d.Set("bridge", container.NetworkSettings.Bridge)
}
return nil
}
func resourceDockerContainerUpdate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceDockerContainerDelete(d *schema.ResourceData, meta interface{}) error {
2015-03-29 03:37:20 +02:00
client := meta.(*dc.Client)
// Stop the container before removing if destroy_grace_seconds is defined
if d.Get("destroy_grace_seconds").(int) > 0 {
var timeout = uint(d.Get("destroy_grace_seconds").(int))
if err := client.StopContainer(d.Id(), timeout); err != nil {
return fmt.Errorf("Error stopping container %s: %s", d.Id(), err)
}
}
removeOpts := dc.RemoveContainerOptions{
ID: d.Id(),
RemoveVolumes: true,
Force: true,
}
if err := client.RemoveContainer(removeOpts); err != nil {
return fmt.Errorf("Error deleting container %s: %s", d.Id(), err)
}
d.SetId("")
return nil
}
func stringListToStringSlice(stringList []interface{}) []string {
ret := []string{}
for _, v := range stringList {
if v == nil {
ret = append(ret, "")
continue
}
ret = append(ret, v.(string))
}
return ret
}
func stringSetToStringSlice(stringSet *schema.Set) []string {
ret := []string{}
if stringSet == nil {
return ret
}
for _, envVal := range stringSet.List() {
ret = append(ret, envVal.(string))
}
return ret
}
func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string {
mapped := make(map[string]string, len(typeMap))
for k, v := range typeMap {
mapped[k] = v.(string)
}
return mapped
}
func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) {
apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true})
if err != nil {
return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err)
}
for _, apiContainer := range apiContainers {
if apiContainer.ID == ID {
return &apiContainer, nil
}
}
return nil, nil
}
func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port][]dc.PortBinding) {
retExposedPorts := map[dc.Port]struct{}{}
retPortBindings := map[dc.Port][]dc.PortBinding{}
for _, portInt := range ports.List() {
port := portInt.(map[string]interface{})
internal := port["internal"].(int)
protocol := port["protocol"].(string)
exposedPort := dc.Port(strconv.Itoa(internal) + "/" + protocol)
retExposedPorts[exposedPort] = struct{}{}
external, extOk := port["external"].(int)
ip, ipOk := port["ip"].(string)
if extOk {
portBinding := dc.PortBinding{
HostPort: strconv.Itoa(external),
}
if ipOk {
portBinding.HostIP = ip
}
retPortBindings[exposedPort] = append(retPortBindings[exposedPort], portBinding)
}
}
return retExposedPorts, retPortBindings
}
func extraHostsSetToDockerExtraHosts(extraHosts *schema.Set) []string {
retExtraHosts := []string{}
for _, hostInt := range extraHosts.List() {
host := hostInt.(map[string]interface{})
ip := host["ip"].(string)
hostname := host["host"].(string)
retExtraHosts = append(retExtraHosts, hostname+":"+ip)
}
return retExtraHosts
}
func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) {
retVolumeMap := map[string]struct{}{}
retHostConfigBinds := []string{}
retVolumeFromContainers := []string{}
for _, volumeInt := range volumes.List() {
volume := volumeInt.(map[string]interface{})
fromContainer := volume["from_container"].(string)
containerPath := volume["container_path"].(string)
volumeName := volume["volume_name"].(string)
if len(volumeName) == 0 {
volumeName = volume["host_path"].(string)
}
readOnly := volume["read_only"].(bool)
switch {
case len(fromContainer) == 0 && len(containerPath) == 0:
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Volume entry without container path or source container")
case len(fromContainer) != 0 && len(containerPath) != 0:
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Both a container and a path specified in a volume entry")
case len(fromContainer) != 0:
retVolumeFromContainers = append(retVolumeFromContainers, fromContainer)
case len(volumeName) != 0:
readWrite := "rw"
if readOnly {
readWrite = "ro"
}
retVolumeMap[containerPath] = struct{}{}
retHostConfigBinds = append(retHostConfigBinds, volumeName+":"+containerPath+":"+readWrite)
default:
retVolumeMap[containerPath] = struct{}{}
}
}
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil
}