As discussed on the issue, remove the hard-coded delay on startup in

favor of attempting to detect if the initial container ever enters
running state, and erroring out if not. It will re-check the container
once every 500ms for 15 seconds total; future work could make that
configurable.
This commit is contained in:
Jeff Mitchell 2015-06-25 14:38:56 +00:00
parent 56cfba2509
commit edbc578316
1 changed files with 41 additions and 18 deletions

View File

@ -11,6 +11,10 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
var (
creationTime time.Time
)
func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) error {
var err error
client := meta.(*dc.Client)
@ -20,15 +24,12 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
return err
}
delayStart := false
image := d.Get("image").(string)
if _, ok := data.DockerImages[image]; !ok {
if _, ok := data.DockerImages[image+":latest"]; !ok {
return fmt.Errorf("Unable to find image %s", image)
} else {
image = image + ":latest"
}
image = image + ":latest"
}
// The awesome, wonderful, splendiferous, sensical
@ -109,15 +110,9 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
if v, ok := d.GetOk("links"); ok {
hostConfig.Links = stringSetToStringSlice(v.(*schema.Set))
delayStart = true
}
// For instance, Docker will fail to start conatiners with links
// to other containers if the containers haven't started yet
if delayStart {
time.Sleep(3 * time.Second)
}
creationTime = time.Now()
if err := client.StartContainer(retContainer.ID, hostConfig); err != nil {
return fmt.Errorf("Unable to start container: %s", err)
}
@ -132,21 +127,49 @@ func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error
if err != nil {
return err
}
if apiContainer == nil {
// This container doesn't exist anymore
d.SetId("")
return nil
}
container, err := client.InspectContainer(apiContainer.ID)
if err != nil {
return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err)
var container *dc.Container
loops := 1 // if it hasn't just been created, don't delay
if !creationTime.IsZero() {
loops = 30 // with 500ms spacing, 15 seconds; ought to be plenty
}
sleepTime := 500 * time.Millisecond
for i := loops; i > 0; i-- {
container, err = client.InspectContainer(apiContainer.ID)
if err != nil {
return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err)
}
if container.State.Running ||
(!container.State.Running && !d.Get("must_run").(bool)) {
break
}
if creationTime.IsZero() { // We didn't just create it, so don't wait around
return resourceDockerContainerDelete(d, meta)
}
if container.State.FinishedAt.After(creationTime) {
// It exited immediately, so error out so dependent containers
// aren't started
resourceDockerContainerDelete(d, meta)
return fmt.Errorf("Container %s exited after creation, error was: %s", apiContainer.ID, container.State.Error)
}
time.Sleep(sleepTime)
}
if d.Get("must_run").(bool) && !container.State.Running {
return resourceDockerContainerDelete(d, meta)
// Handle the case of the for loop above running its course
if !container.State.Running && d.Get("must_run").(bool) {
resourceDockerContainerDelete(d, meta)
return fmt.Errorf("Container %s failed to be in running state", apiContainer.ID)
}
// Read Network Settings