2016-01-26 21:45:18 +01:00
|
|
|
package azurerm
|
|
|
|
|
|
|
|
import (
|
2016-08-02 02:44:20 +02:00
|
|
|
"bytes"
|
|
|
|
"crypto/rand"
|
|
|
|
"encoding/base64"
|
2016-01-26 21:45:18 +01:00
|
|
|
"fmt"
|
2016-08-02 02:44:20 +02:00
|
|
|
"io"
|
2016-01-26 21:45:18 +01:00
|
|
|
"log"
|
2016-08-02 02:44:20 +02:00
|
|
|
"os"
|
|
|
|
"runtime"
|
2016-01-26 21:45:18 +01:00
|
|
|
"strings"
|
2016-08-02 02:44:20 +02:00
|
|
|
"sync"
|
2016-01-26 21:45:18 +01:00
|
|
|
|
2016-08-02 02:44:20 +02:00
|
|
|
"github.com/Azure/azure-sdk-for-go/storage"
|
2016-01-26 21:45:18 +01:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
|
|
)
|
|
|
|
|
|
|
|
func resourceArmStorageBlob() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceArmStorageBlobCreate,
|
|
|
|
Read: resourceArmStorageBlobRead,
|
|
|
|
Exists: resourceArmStorageBlobExists,
|
|
|
|
Delete: resourceArmStorageBlobDelete,
|
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-06-01 22:17:21 +02:00
|
|
|
"name": {
|
2016-01-26 21:45:18 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2016-06-01 22:17:21 +02:00
|
|
|
"resource_group_name": {
|
2016-01-26 21:45:18 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"storage_account_name": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2016-06-01 22:17:21 +02:00
|
|
|
"storage_container_name": {
|
2016-01-26 21:45:18 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2016-06-01 22:17:21 +02:00
|
|
|
"type": {
|
2016-01-26 21:45:18 +01:00
|
|
|
Type: schema.TypeString,
|
2016-08-14 12:14:42 +02:00
|
|
|
Optional: true,
|
2016-01-26 21:45:18 +01:00
|
|
|
ForceNew: true,
|
|
|
|
ValidateFunc: validateArmStorageBlobType,
|
|
|
|
},
|
2016-06-01 22:17:21 +02:00
|
|
|
"size": {
|
2016-01-27 01:06:33 +01:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Default: 0,
|
|
|
|
ValidateFunc: validateArmStorageBlobSize,
|
2016-01-26 21:45:18 +01:00
|
|
|
},
|
2016-08-02 02:44:20 +02:00
|
|
|
"source": {
|
2016-08-14 12:14:42 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
ConflictsWith: []string{"source_uri"},
|
|
|
|
},
|
|
|
|
"source_uri": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
ConflictsWith: []string{"source"},
|
2016-08-02 02:44:20 +02:00
|
|
|
},
|
2016-06-01 22:17:21 +02:00
|
|
|
"url": {
|
2016-01-26 21:45:18 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-08-02 02:44:20 +02:00
|
|
|
"parallelism": {
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Default: 8,
|
|
|
|
ForceNew: true,
|
|
|
|
ValidateFunc: validateArmStorageBlobParallelism,
|
|
|
|
},
|
|
|
|
"attempts": {
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Default: 1,
|
|
|
|
ForceNew: true,
|
|
|
|
ValidateFunc: validateArmStorageBlobAttempts,
|
|
|
|
},
|
2016-01-26 21:45:18 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-02 02:44:20 +02:00
|
|
|
func validateArmStorageBlobParallelism(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(int)
|
|
|
|
|
|
|
|
if value <= 0 {
|
|
|
|
errors = append(errors, fmt.Errorf("Blob Parallelism %q is invalid, must be greater than 0", value))
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateArmStorageBlobAttempts(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(int)
|
|
|
|
|
|
|
|
if value <= 0 {
|
|
|
|
errors = append(errors, fmt.Errorf("Blob Attempts %q is invalid, must be greater than 0", value))
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-27 01:06:33 +01:00
|
|
|
func validateArmStorageBlobSize(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(int)
|
|
|
|
|
|
|
|
if value%512 != 0 {
|
|
|
|
errors = append(errors, fmt.Errorf("Blob Size %q is invalid, must be a multiple of 512", value))
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-26 21:45:18 +01:00
|
|
|
func validateArmStorageBlobType(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := strings.ToLower(v.(string))
|
|
|
|
validTypes := map[string]struct{}{
|
2016-07-28 14:32:40 +02:00
|
|
|
"block": struct{}{},
|
|
|
|
"page": struct{}{},
|
2016-01-26 21:45:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := validTypes[value]; !ok {
|
2016-07-28 14:32:40 +02:00
|
|
|
errors = append(errors, fmt.Errorf("Blob type %q is invalid, must be %q or %q", value, "block", "page"))
|
2016-01-26 21:45:18 +01:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
armClient := meta.(*ArmClient)
|
|
|
|
|
|
|
|
resourceGroupName := d.Get("resource_group_name").(string)
|
|
|
|
storageAccountName := d.Get("storage_account_name").(string)
|
|
|
|
|
2016-05-25 00:01:26 +02:00
|
|
|
blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
|
2016-01-26 21:45:18 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-05-25 00:01:26 +02:00
|
|
|
if !accountExists {
|
|
|
|
return fmt.Errorf("Storage Account %q Not Found", storageAccountName)
|
|
|
|
}
|
2016-01-26 21:45:18 +01:00
|
|
|
|
|
|
|
name := d.Get("name").(string)
|
|
|
|
blobType := d.Get("type").(string)
|
|
|
|
cont := d.Get("storage_container_name").(string)
|
2016-08-14 12:14:42 +02:00
|
|
|
sourceUri := d.Get("source_uri").(string)
|
2016-01-26 21:45:18 +01:00
|
|
|
|
|
|
|
log.Printf("[INFO] Creating blob %q in storage account %q", name, storageAccountName)
|
2016-08-14 12:14:42 +02:00
|
|
|
if sourceUri != "" {
|
|
|
|
if err := blobClient.CopyBlob(cont, name, sourceUri); err != nil {
|
2016-08-02 02:44:20 +02:00
|
|
|
return fmt.Errorf("Error creating storage blob on Azure: %s", err)
|
|
|
|
}
|
2016-08-14 12:14:42 +02:00
|
|
|
} else {
|
|
|
|
switch strings.ToLower(blobType) {
|
|
|
|
case "block":
|
|
|
|
if err := blobClient.CreateBlockBlob(cont, name); err != nil {
|
2016-08-02 02:44:20 +02:00
|
|
|
return fmt.Errorf("Error creating storage blob on Azure: %s", err)
|
|
|
|
}
|
2016-08-14 12:14:42 +02:00
|
|
|
|
|
|
|
source := d.Get("source").(string)
|
|
|
|
if source != "" {
|
|
|
|
parallelism := d.Get("parallelism").(int)
|
|
|
|
attempts := d.Get("attempts").(int)
|
|
|
|
if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil {
|
|
|
|
return fmt.Errorf("Error creating storage blob on Azure: %s", err)
|
|
|
|
}
|
2016-08-02 02:44:20 +02:00
|
|
|
}
|
2016-08-14 12:14:42 +02:00
|
|
|
case "page":
|
|
|
|
source := d.Get("source").(string)
|
|
|
|
if source != "" {
|
|
|
|
parallelism := d.Get("parallelism").(int)
|
|
|
|
attempts := d.Get("attempts").(int)
|
|
|
|
if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil {
|
|
|
|
return fmt.Errorf("Error creating storage blob on Azure: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
size := int64(d.Get("size").(int))
|
|
|
|
if err := blobClient.PutPageBlob(cont, name, size, map[string]string{}); err != nil {
|
|
|
|
return fmt.Errorf("Error creating storage blob on Azure: %s", err)
|
|
|
|
}
|
2016-08-02 02:44:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId(name)
|
|
|
|
return resourceArmStorageBlobRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
type resourceArmStorageBlobPage struct {
|
|
|
|
offset int64
|
|
|
|
section *io.SectionReader
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobPageUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
|
|
|
|
workerCount := parallelism * runtime.NumCPU()
|
|
|
|
|
|
|
|
file, err := os.Open(source)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
|
2016-01-26 21:45:18 +01:00
|
|
|
}
|
2016-08-02 02:44:20 +02:00
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
blobSize, pageList, err := resourceArmStorageBlobPageSplit(file)
|
2016-01-26 21:45:18 +01:00
|
|
|
if err != nil {
|
2016-08-02 02:44:20 +02:00
|
|
|
return fmt.Errorf("Error splitting source file %q into pages: %s", source, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := client.PutPageBlob(container, name, blobSize, map[string]string{}); err != nil {
|
2016-01-26 21:45:18 +01:00
|
|
|
return fmt.Errorf("Error creating storage blob on Azure: %s", err)
|
|
|
|
}
|
|
|
|
|
2016-08-02 02:44:20 +02:00
|
|
|
pages := make(chan resourceArmStorageBlobPage, len(pageList))
|
|
|
|
errors := make(chan error, len(pageList))
|
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
wg.Add(len(pageList))
|
|
|
|
|
|
|
|
total := int64(0)
|
|
|
|
for _, page := range pageList {
|
|
|
|
total += page.section.Size()
|
|
|
|
pages <- page
|
|
|
|
}
|
|
|
|
close(pages)
|
|
|
|
|
|
|
|
for i := 0; i < workerCount; i++ {
|
|
|
|
go resourceArmStorageBlobPageUploadWorker(resourceArmStorageBlobPageUploadContext{
|
|
|
|
container: container,
|
|
|
|
name: name,
|
|
|
|
source: source,
|
|
|
|
blobSize: blobSize,
|
|
|
|
client: client,
|
|
|
|
pages: pages,
|
|
|
|
errors: errors,
|
|
|
|
wg: wg,
|
|
|
|
attempts: attempts,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
if len(errors) > 0 {
|
|
|
|
return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobPageSplit(file *os.File) (int64, []resourceArmStorageBlobPage, error) {
|
|
|
|
const (
|
|
|
|
minPageSize int64 = 4 * 1024
|
|
|
|
maxPageSize int64 = 4 * 1024 * 1024
|
|
|
|
)
|
|
|
|
|
|
|
|
info, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return int64(0), nil, fmt.Errorf("Could not stat file %q: %s", file.Name(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
blobSize := info.Size()
|
|
|
|
if info.Size()%minPageSize != 0 {
|
|
|
|
blobSize = info.Size() + (minPageSize - (info.Size() % minPageSize))
|
|
|
|
}
|
|
|
|
|
|
|
|
emptyPage := make([]byte, minPageSize)
|
|
|
|
|
|
|
|
type byteRange struct {
|
|
|
|
offset int64
|
|
|
|
length int64
|
|
|
|
}
|
|
|
|
|
|
|
|
var nonEmptyRanges []byteRange
|
|
|
|
var currentRange byteRange
|
|
|
|
for i := int64(0); i < blobSize; i += minPageSize {
|
|
|
|
pageBuf := make([]byte, minPageSize)
|
|
|
|
_, err = file.ReadAt(pageBuf, i)
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
return int64(0), nil, fmt.Errorf("Could not read chunk at %d: %s", i, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if bytes.Equal(pageBuf, emptyPage) {
|
|
|
|
if currentRange.length != 0 {
|
|
|
|
nonEmptyRanges = append(nonEmptyRanges, currentRange)
|
|
|
|
}
|
|
|
|
currentRange = byteRange{
|
|
|
|
offset: i + minPageSize,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
currentRange.length += minPageSize
|
|
|
|
if currentRange.length == maxPageSize || (currentRange.offset+currentRange.length == blobSize) {
|
|
|
|
nonEmptyRanges = append(nonEmptyRanges, currentRange)
|
|
|
|
currentRange = byteRange{
|
|
|
|
offset: i + minPageSize,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var pages []resourceArmStorageBlobPage
|
|
|
|
for _, nonEmptyRange := range nonEmptyRanges {
|
|
|
|
pages = append(pages, resourceArmStorageBlobPage{
|
|
|
|
offset: nonEmptyRange.offset,
|
|
|
|
section: io.NewSectionReader(file, nonEmptyRange.offset, nonEmptyRange.length),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return info.Size(), pages, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type resourceArmStorageBlobPageUploadContext struct {
|
|
|
|
container string
|
|
|
|
name string
|
|
|
|
source string
|
|
|
|
blobSize int64
|
|
|
|
client *storage.BlobStorageClient
|
|
|
|
pages chan resourceArmStorageBlobPage
|
|
|
|
errors chan error
|
|
|
|
wg *sync.WaitGroup
|
|
|
|
attempts int
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobPageUploadWorker(ctx resourceArmStorageBlobPageUploadContext) {
|
|
|
|
for page := range ctx.pages {
|
|
|
|
start := page.offset
|
|
|
|
end := page.offset + page.section.Size() - 1
|
|
|
|
if end > ctx.blobSize-1 {
|
|
|
|
end = ctx.blobSize - 1
|
|
|
|
}
|
|
|
|
size := end - start + 1
|
|
|
|
|
|
|
|
chunk := make([]byte, size)
|
|
|
|
_, err := page.section.Read(chunk)
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
ctx.errors <- fmt.Errorf("Error reading source file %q at offset %d: %s", ctx.source, page.offset, err)
|
|
|
|
ctx.wg.Done()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for x := 0; x < ctx.attempts; x++ {
|
|
|
|
err = ctx.client.PutPage(ctx.container, ctx.name, start, end, storage.PageWriteTypeUpdate, chunk, map[string]string{})
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
ctx.errors <- fmt.Errorf("Error writing page at offset %d for file %q: %s", page.offset, ctx.source, err)
|
|
|
|
ctx.wg.Done()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.wg.Done()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type resourceArmStorageBlobBlock struct {
|
|
|
|
section *io.SectionReader
|
|
|
|
id string
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobBlockUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
|
|
|
|
workerCount := parallelism * runtime.NumCPU()
|
|
|
|
|
|
|
|
file, err := os.Open(source)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
blockList, parts, err := resourceArmStorageBlobBlockSplit(file)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error reading and splitting source file for upload %q: %s", source, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
blocks := make(chan resourceArmStorageBlobBlock, len(parts))
|
|
|
|
errors := make(chan error, len(parts))
|
|
|
|
|
|
|
|
wg.Add(len(parts))
|
|
|
|
for _, p := range parts {
|
|
|
|
blocks <- p
|
|
|
|
}
|
|
|
|
close(blocks)
|
|
|
|
|
|
|
|
for i := 0; i < workerCount; i++ {
|
|
|
|
go resourceArmStorageBlobBlockUploadWorker(resourceArmStorageBlobBlockUploadContext{
|
|
|
|
client: client,
|
|
|
|
source: source,
|
|
|
|
container: container,
|
|
|
|
name: name,
|
|
|
|
blocks: blocks,
|
|
|
|
errors: errors,
|
|
|
|
wg: wg,
|
|
|
|
attempts: attempts,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
if len(errors) > 0 {
|
|
|
|
return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = client.PutBlockList(container, name, blockList)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error updating block list for source file %q: %s", source, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobBlockSplit(file *os.File) ([]storage.Block, []resourceArmStorageBlobBlock, error) {
|
|
|
|
const (
|
|
|
|
idSize = 64
|
|
|
|
blockSize int64 = 4 * 1024 * 1024
|
|
|
|
)
|
|
|
|
var parts []resourceArmStorageBlobBlock
|
|
|
|
var blockList []storage.Block
|
|
|
|
|
|
|
|
info, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("Error stating source file %q: %s", file.Name(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := int64(0); i < info.Size(); i = i + blockSize {
|
|
|
|
entropy := make([]byte, idSize)
|
|
|
|
_, err = rand.Read(entropy)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("Error generating a random block ID for source file %q: %s", file.Name(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sectionSize := blockSize
|
|
|
|
remainder := info.Size() - i
|
|
|
|
if remainder < blockSize {
|
|
|
|
sectionSize = remainder
|
|
|
|
}
|
|
|
|
|
|
|
|
block := storage.Block{
|
|
|
|
ID: base64.StdEncoding.EncodeToString(entropy),
|
|
|
|
Status: storage.BlockStatusUncommitted,
|
|
|
|
}
|
|
|
|
|
|
|
|
blockList = append(blockList, block)
|
|
|
|
|
|
|
|
parts = append(parts, resourceArmStorageBlobBlock{
|
|
|
|
id: block.ID,
|
|
|
|
section: io.NewSectionReader(file, i, sectionSize),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return blockList, parts, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type resourceArmStorageBlobBlockUploadContext struct {
|
|
|
|
client *storage.BlobStorageClient
|
|
|
|
container string
|
|
|
|
name string
|
|
|
|
source string
|
|
|
|
attempts int
|
|
|
|
blocks chan resourceArmStorageBlobBlock
|
|
|
|
errors chan error
|
|
|
|
wg *sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobBlockUploadWorker(ctx resourceArmStorageBlobBlockUploadContext) {
|
|
|
|
for block := range ctx.blocks {
|
|
|
|
buffer := make([]byte, block.section.Size())
|
|
|
|
|
|
|
|
_, err := block.section.Read(buffer)
|
|
|
|
if err != nil {
|
|
|
|
ctx.errors <- fmt.Errorf("Error reading source file %q: %s", ctx.source, err)
|
|
|
|
ctx.wg.Done()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < ctx.attempts; i++ {
|
|
|
|
err = ctx.client.PutBlock(ctx.container, ctx.name, block.id, buffer)
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
ctx.errors <- fmt.Errorf("Error uploading block %q for source file %q: %s", block.id, ctx.source, err)
|
|
|
|
ctx.wg.Done()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.wg.Done()
|
|
|
|
}
|
2016-01-26 21:45:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
armClient := meta.(*ArmClient)
|
|
|
|
|
|
|
|
resourceGroupName := d.Get("resource_group_name").(string)
|
|
|
|
storageAccountName := d.Get("storage_account_name").(string)
|
|
|
|
|
2016-05-25 00:01:26 +02:00
|
|
|
blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
|
2016-01-26 21:45:18 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-05-25 00:01:26 +02:00
|
|
|
if !accountExists {
|
|
|
|
log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id())
|
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
2016-01-26 21:45:18 +01:00
|
|
|
|
|
|
|
exists, err := resourceArmStorageBlobExists(d, meta)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !exists {
|
|
|
|
// Exists already removed this from state
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
name := d.Get("name").(string)
|
|
|
|
storageContainerName := d.Get("storage_container_name").(string)
|
|
|
|
|
|
|
|
url := blobClient.GetBlobURL(storageContainerName, name)
|
|
|
|
if url == "" {
|
|
|
|
log.Printf("[INFO] URL for %q is empty", name)
|
|
|
|
}
|
|
|
|
d.Set("url", url)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
|
|
|
armClient := meta.(*ArmClient)
|
|
|
|
|
|
|
|
resourceGroupName := d.Get("resource_group_name").(string)
|
|
|
|
storageAccountName := d.Get("storage_account_name").(string)
|
|
|
|
|
2016-05-25 00:01:26 +02:00
|
|
|
blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
|
2016-01-26 21:45:18 +01:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2016-05-25 00:01:26 +02:00
|
|
|
if !accountExists {
|
|
|
|
log.Printf("[DEBUG] Storage account %q not found, removing blob %q from state", storageAccountName, d.Id())
|
|
|
|
d.SetId("")
|
|
|
|
return false, nil
|
|
|
|
}
|
2016-01-26 21:45:18 +01:00
|
|
|
|
|
|
|
name := d.Get("name").(string)
|
|
|
|
storageContainerName := d.Get("storage_container_name").(string)
|
|
|
|
|
|
|
|
log.Printf("[INFO] Checking for existence of storage blob %q.", name)
|
|
|
|
exists, err := blobClient.BlobExists(storageContainerName, name)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("error testing existence of storage blob %q: %s", name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !exists {
|
|
|
|
log.Printf("[INFO] Storage blob %q no longer exists, removing from state...", name)
|
|
|
|
d.SetId("")
|
|
|
|
}
|
|
|
|
|
|
|
|
return exists, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceArmStorageBlobDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
armClient := meta.(*ArmClient)
|
|
|
|
|
|
|
|
resourceGroupName := d.Get("resource_group_name").(string)
|
|
|
|
storageAccountName := d.Get("storage_account_name").(string)
|
|
|
|
|
2016-05-25 00:01:26 +02:00
|
|
|
blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroupName, storageAccountName)
|
2016-01-26 21:45:18 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-05-25 00:01:26 +02:00
|
|
|
if !accountExists {
|
|
|
|
log.Printf("[INFO]Storage Account %q doesn't exist so the blob won't exist", storageAccountName)
|
|
|
|
return nil
|
|
|
|
}
|
2016-01-26 21:45:18 +01:00
|
|
|
|
|
|
|
name := d.Get("name").(string)
|
|
|
|
storageContainerName := d.Get("storage_container_name").(string)
|
|
|
|
|
|
|
|
log.Printf("[INFO] Deleting storage blob %q", name)
|
2016-06-01 22:17:21 +02:00
|
|
|
if _, err = blobClient.DeleteBlobIfExists(storageContainerName, name, map[string]string{}); err != nil {
|
2016-01-26 21:45:18 +01:00
|
|
|
return fmt.Errorf("Error deleting storage blob %q: %s", name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|