2016-01-29 20:53:56 +01:00
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package firehose provides a client for Amazon Kinesis Firehose.
package firehose
import (
"time"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
const opCreateDeliveryStream = "CreateDeliveryStream"
// CreateDeliveryStreamRequest generates a request for the CreateDeliveryStream operation.
func ( c * Firehose ) CreateDeliveryStreamRequest ( input * CreateDeliveryStreamInput ) ( req * request . Request , output * CreateDeliveryStreamOutput ) {
op := & request . Operation {
Name : opCreateDeliveryStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & CreateDeliveryStreamInput { }
}
req = c . newRequest ( op , input , output )
output = & CreateDeliveryStreamOutput { }
req . Data = output
return
}
// Creates a delivery stream.
//
// CreateDeliveryStream is an asynchronous operation that immediately returns.
// The initial status of the delivery stream is CREATING. After the delivery
// stream is created, its status is ACTIVE and it now accepts data. Attempts
// to send data to a delivery stream that is not in the ACTIVE state cause an
// exception. To check the state of a delivery stream, use DescribeDeliveryStream.
//
// The name of a delivery stream identifies it. You can't have two delivery
// streams with the same name in the same region. Two delivery streams in different
// AWS accounts or different regions in the same AWS account can have the same
// name.
//
// By default, you can create up to 5 delivery streams per region.
//
// A delivery stream can only be configured with a single destination, Amazon
// S3 or Amazon Redshift. For correct CreateDeliveryStream request syntax, specify
// only one destination configuration parameter: either RedshiftDestinationConfiguration
// or S3DestinationConfiguration
//
// As part of S3DestinationConfiguration, optional values BufferingHints, EncryptionConfiguration,
// and CompressionFormat can be provided. By default, if no BufferingHints value
// is provided, Amazon Kinesis Firehose buffers data up to 5 MB or for 5 minutes,
// whichever condition is satisfied first. Note that BufferingHints is a hint,
// so there are some cases where the service cannot adhere to these conditions
// strictly; for example, record boundaries are such that the size is a little
// over or under the configured buffering size. By default, no encryption is
// performed. We strongly recommend that you enable encryption to ensure secure
// data storage in Amazon S3.
//
// A few notes about RedshiftDestinationConfiguration:
//
// An Amazon Redshift destination requires an S3 bucket as intermediate location,
// as Amazon Kinesis Firehose first delivers data to S3 and then uses COPY syntax
// to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration
// parameter element. The compression formats SNAPPY or ZIP cannot be specified
// in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift
// COPY operation that reads from the S3 bucket doesn't support these compression
// formats. We strongly recommend that the username and password provided is
// used exclusively for Amazon Kinesis Firehose purposes, and that the permissions
// for the account are restricted for Amazon Redshift INSERT permissions. Amazon
// Kinesis Firehose assumes the IAM role that is configured as part of destinations.
// The IAM role should allow the Amazon Kinesis Firehose principal to assume
// the role, and the role should have permissions that allows the service to
// deliver the data. For more information, see Amazon S3 Bucket Access (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3)
// in the Amazon Kinesis Firehose Developer Guide.
func ( c * Firehose ) CreateDeliveryStream ( input * CreateDeliveryStreamInput ) ( * CreateDeliveryStreamOutput , error ) {
req , out := c . CreateDeliveryStreamRequest ( input )
err := req . Send ( )
return out , err
}
const opDeleteDeliveryStream = "DeleteDeliveryStream"
// DeleteDeliveryStreamRequest generates a request for the DeleteDeliveryStream operation.
func ( c * Firehose ) DeleteDeliveryStreamRequest ( input * DeleteDeliveryStreamInput ) ( req * request . Request , output * DeleteDeliveryStreamOutput ) {
op := & request . Operation {
Name : opDeleteDeliveryStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & DeleteDeliveryStreamInput { }
}
req = c . newRequest ( op , input , output )
output = & DeleteDeliveryStreamOutput { }
req . Data = output
return
}
// Deletes a delivery stream and its data.
//
// You can delete a delivery stream only if it is in ACTIVE or DELETING state,
// and not in the CREATING state. While the deletion request is in process,
// the delivery stream is in the DELETING state.
//
// To check the state of a delivery stream, use DescribeDeliveryStream.
//
// While the delivery stream is DELETING state, the service may continue to
// accept the records, but the service doesn't make any guarantees with respect
// to delivering the data. Therefore, as a best practice, you should first stop
// any applications that are sending records before deleting a delivery stream.
func ( c * Firehose ) DeleteDeliveryStream ( input * DeleteDeliveryStreamInput ) ( * DeleteDeliveryStreamOutput , error ) {
req , out := c . DeleteDeliveryStreamRequest ( input )
err := req . Send ( )
return out , err
}
const opDescribeDeliveryStream = "DescribeDeliveryStream"
// DescribeDeliveryStreamRequest generates a request for the DescribeDeliveryStream operation.
func ( c * Firehose ) DescribeDeliveryStreamRequest ( input * DescribeDeliveryStreamInput ) ( req * request . Request , output * DescribeDeliveryStreamOutput ) {
op := & request . Operation {
Name : opDescribeDeliveryStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & DescribeDeliveryStreamInput { }
}
req = c . newRequest ( op , input , output )
output = & DescribeDeliveryStreamOutput { }
req . Data = output
return
}
// Describes the specified delivery stream and gets the status. For example,
// after your delivery stream is created, call DescribeDeliveryStream to see
// if the delivery stream is ACTIVE and therefore ready for data to be sent
// to it.
func ( c * Firehose ) DescribeDeliveryStream ( input * DescribeDeliveryStreamInput ) ( * DescribeDeliveryStreamOutput , error ) {
req , out := c . DescribeDeliveryStreamRequest ( input )
err := req . Send ( )
return out , err
}
const opListDeliveryStreams = "ListDeliveryStreams"
// ListDeliveryStreamsRequest generates a request for the ListDeliveryStreams operation.
func ( c * Firehose ) ListDeliveryStreamsRequest ( input * ListDeliveryStreamsInput ) ( req * request . Request , output * ListDeliveryStreamsOutput ) {
op := & request . Operation {
Name : opListDeliveryStreams ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & ListDeliveryStreamsInput { }
}
req = c . newRequest ( op , input , output )
output = & ListDeliveryStreamsOutput { }
req . Data = output
return
}
// Lists your delivery streams.
//
// The number of delivery streams might be too large to return using a single
// call to ListDeliveryStreams. You can limit the number of delivery streams
// returned, using the Limit parameter. To determine whether there are more
// delivery streams to list, check the value of HasMoreDeliveryStreams in the
// output. If there are more delivery streams to list, you can request them
// by specifying the name of the last delivery stream returned in the call in
// the ExclusiveStartDeliveryStreamName parameter of a subsequent call.
func ( c * Firehose ) ListDeliveryStreams ( input * ListDeliveryStreamsInput ) ( * ListDeliveryStreamsOutput , error ) {
req , out := c . ListDeliveryStreamsRequest ( input )
err := req . Send ( )
return out , err
}
const opPutRecord = "PutRecord"
// PutRecordRequest generates a request for the PutRecord operation.
func ( c * Firehose ) PutRecordRequest ( input * PutRecordInput ) ( req * request . Request , output * PutRecordOutput ) {
op := & request . Operation {
Name : opPutRecord ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & PutRecordInput { }
}
req = c . newRequest ( op , input , output )
output = & PutRecordOutput { }
req . Data = output
return
}
// Writes a single data record into an Amazon Kinesis Firehose delivery stream.
// To write multiple data records into a delivery stream, use PutRecordBatch.
// Applications using these operations are referred to as producers.
//
// By default, each delivery stream can take in up to 2,000 transactions per
// second, 5,000 records per second, or 5 MB per second. Note that if you use
// PutRecord and PutRecordBatch, the limits are an aggregate across these two
// operations for each delivery stream. For more information about limits and
// how to request an increase, see Amazon Kinesis Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
//
// You must specify the name of the delivery stream and the data record when
// using PutRecord. The data record consists of a data blob that can be up to
// 1,000 KB in size, and any kind of data, for example, a segment from a log
// file, geographic location data, web site clickstream data, etc.
//
// Amazon Kinesis Firehose buffers records before delivering them to the destination.
// To disambiguate the data blobs at the destination, a common solution is to
// use delimiters in the data, such as a newline (\n) or some other character
// unique within the data. This allows the consumer application(s) to parse
// individual data items when reading the data from the destination.
//
// Amazon Kinesis Firehose does not maintain data record ordering. If the destination
// data needs to be re-ordered by the consumer application, the producer should
// include some form of sequence number in each data record.
//
// The PutRecord operation returns a RecordId, which is a unique string assigned
// to each record. Producer applications can use this ID for purposes such as
// auditability and investigation.
//
// If the PutRecord operation throws a ServiceUnavailableException, back off
// and retry. If the exception persists, it is possible that the throughput
// limits have been exceeded for the delivery stream.
//
// Data records sent to Amazon Kinesis Firehose are stored for 24 hours from
// the time they are added to a delivery stream as it attempts to send the records
// to the destination. If the destination is unreachable for more than 24 hours,
// the data is no longer available.
func ( c * Firehose ) PutRecord ( input * PutRecordInput ) ( * PutRecordOutput , error ) {
req , out := c . PutRecordRequest ( input )
err := req . Send ( )
return out , err
}
const opPutRecordBatch = "PutRecordBatch"
// PutRecordBatchRequest generates a request for the PutRecordBatch operation.
func ( c * Firehose ) PutRecordBatchRequest ( input * PutRecordBatchInput ) ( req * request . Request , output * PutRecordBatchOutput ) {
op := & request . Operation {
Name : opPutRecordBatch ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & PutRecordBatchInput { }
}
req = c . newRequest ( op , input , output )
output = & PutRecordBatchOutput { }
req . Data = output
return
}
// Writes multiple data records into a delivery stream in a single call, which
// can achieve higher throughput per producer than when writing single records.
// To write single data records into a delivery stream, use PutRecord. Applications
// using these operations are referred to as producers.
//
// Each PutRecordBatch request supports up to 500 records. Each record in the
// request can be as large as 1,000 KB (before 64-bit encoding), up to a limit
// of 4 MB for the entire request. By default, each delivery stream can take
// in up to 2,000 transactions per second, 5,000 records per second, or 5 MB
// per second. Note that if you use PutRecord and PutRecordBatch, the limits
// are an aggregate across these two operations for each delivery stream. For
// more information about limits and how to request an increase, see Amazon
// Kinesis Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
//
// You must specify the name of the delivery stream and the data record when
// using PutRecord. The data record consists of a data blob that can be up to
// 1,000 KB in size, and any kind of data, for example, a segment from a log
// file, geographic location data, web site clickstream data, and so on.
//
// Amazon Kinesis Firehose buffers records before delivering them to the destination.
// To disambiguate the data blobs at the destination, a common solution is to
// use delimiters in the data, such as a newline (\n) or some other character
// unique within the data. This allows the consumer application(s) to parse
// individual data items when reading the data from the destination.
//
// The PutRecordBatch response includes a count of any failed records, FailedPutCount,
// and an array of responses, RequestResponses. The FailedPutCount value is
// a count of records that failed. Each entry in the RequestResponses array
// gives additional information of the processed record. Each entry in RequestResponses
// directly correlates with a record in the request array using the same ordering,
// from the top to the bottom of the request and response. RequestResponses
// always includes the same number of records as the request array. RequestResponses
// both successfully and unsuccessfully processed records. Amazon Kinesis Firehose
// attempts to process all records in each PutRecordBatch request. A single
// record failure does not stop the processing of subsequent records.
//
// A successfully processed record includes a RecordId value, which is a unique
// value identified for the record. An unsuccessfully processed record includes
// ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and
// is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage
// provides more detailed information about the error.
//
// If FailedPutCount is greater than 0 (zero), retry the request. A retry of
// the entire batch of records is possible; however, we strongly recommend that
// you inspect the entire response and resend only those records that failed
// processing. This minimizes duplicate records and also reduces the total bytes
// sent (and corresponding charges).
//
// If the PutRecordBatch operation throws a ServiceUnavailableException, back
// off and retry. If the exception persists, it is possible that the throughput
// limits have been exceeded for the delivery stream.
//
// Data records sent to Amazon Kinesis Firehose are stored for 24 hours from
// the time they are added to a delivery stream as it attempts to send the records
// to the destination. If the destination is unreachable for more than 24 hours,
// the data is no longer available.
func ( c * Firehose ) PutRecordBatch ( input * PutRecordBatchInput ) ( * PutRecordBatchOutput , error ) {
req , out := c . PutRecordBatchRequest ( input )
err := req . Send ( )
return out , err
}
const opUpdateDestination = "UpdateDestination"
// UpdateDestinationRequest generates a request for the UpdateDestination operation.
func ( c * Firehose ) UpdateDestinationRequest ( input * UpdateDestinationInput ) ( req * request . Request , output * UpdateDestinationOutput ) {
op := & request . Operation {
Name : opUpdateDestination ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & UpdateDestinationInput { }
}
req = c . newRequest ( op , input , output )
output = & UpdateDestinationOutput { }
req . Data = output
return
}
// Updates the specified destination of the specified delivery stream.
//
// This operation can be used to change the destination type (for example,
// to replace the Amazon S3 destination with Amazon Redshift) or change the
// parameters associated with a given destination (for example, to change the
// bucket name of the Amazon S3 destination). The update may not occur immediately.
// The target delivery stream remains active while the configurations are updated,
// so data writes to the delivery stream can continue during this process. The
// updated configurations are normally effective within a few minutes.
//
// If the destination type is the same, Amazon Kinesis Firehose merges the
// configuration parameters specified in the UpdateDestination request with
// the destination configuration that already exists on the delivery stream.
// If any of the parameters are not specified in the update request, then the
// existing configuration parameters are retained. For example, in the Amazon
// S3 destination, if EncryptionConfiguration is not specified then the existing
// EncryptionConfiguration is maintained on the destination.
//
// If the destination type is not the same, for example, changing the destination
// from Amazon S3 to Amazon Redshift, Amazon Kinesis Firehose does not merge
// any parameters. In this case, all parameters must be specified.
//
// Amazon Kinesis Firehose uses the CurrentDeliveryStreamVersionId to avoid
// race conditions and conflicting merges. This is a required field in every
// request and the service only updates the configuration if the existing configuration
// matches the VersionId. After the update is applied successfully, the VersionId
// is updated, which can be retrieved with the DescribeDeliveryStream operation.
// The new VersionId should be uses to set CurrentDeliveryStreamVersionId in
// the next UpdateDestination operation.
func ( c * Firehose ) UpdateDestination ( input * UpdateDestinationInput ) ( * UpdateDestinationOutput , error ) {
req , out := c . UpdateDestinationRequest ( input )
err := req . Send ( )
return out , err
}
// Describes the buffering to perform before delivering data to the destination.
type BufferingHints struct {
_ struct { } ` type:"structure" `
// Buffer incoming data for the specified period of time, in seconds, before
// delivering it to the destination. The default value is 300.
IntervalInSeconds * int64 ` min:"60" type:"integer" `
// Buffer incoming data to the specified size, in MBs, before delivering it
// to the destination. The default value is 5.
//
// We recommend setting SizeInMBs to a value greater than the amount of data
// you typically ingest into the delivery stream in 10 seconds. For example,
// if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
SizeInMBs * int64 ` min:"1" type:"integer" `
}
// String returns the string representation
func ( s BufferingHints ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s BufferingHints ) GoString ( ) string {
return s . String ( )
}
// Describes a COPY command for Amazon Redshift.
type CopyCommand struct {
_ struct { } ` type:"structure" `
// Optional parameters to use with the Amazon Redshift COPY command. For more
// information, see the "Optional Parameters" section of Amazon Redshift COPY
// command (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some
// possible examples that would apply to Amazon Kinesis Firehose are as follows.
//
// delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and
// compressed using lzop.
//
// delimiter '| - fields are delimited with "|" (this is the default delimiter).
//
// delimiter '|' escape - the delimiter should be escaped.
//
// fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'
// - fields are fixed width in the source, with each width specified after every
// column in the table.
//
// JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path
// specified is the format of the data.
//
// For more examples, see and Amazon Redshift COPY command exmaples (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html).
CopyOptions * string ` type:"string" `
// A comma-separated list of column names.
DataTableColumns * string ` type:"string" `
// The name of the target table. The table must already exist in the database.
DataTableName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s CopyCommand ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s CopyCommand ) GoString ( ) string {
return s . String ( )
}
// Contains the parameters for CreateDeliveryStream.
type CreateDeliveryStreamInput struct {
_ struct { } ` type:"structure" `
// The name of the delivery stream.
DeliveryStreamName * string ` min:"1" type:"string" required:"true" `
// The destination in Amazon Redshift. This value cannot be specified if Amazon
// S3 is the desired destination (see restrictions listed above).
RedshiftDestinationConfiguration * RedshiftDestinationConfiguration ` type:"structure" `
// The destination in Amazon S3. This value must be specified if RedshiftDestinationConfiguration
// is specified (see restrictions listed above).
S3DestinationConfiguration * S3DestinationConfiguration ` type:"structure" `
}
// String returns the string representation
func ( s CreateDeliveryStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s CreateDeliveryStreamInput ) GoString ( ) string {
return s . String ( )
}
// Contains the output of CreateDeliveryStream.
type CreateDeliveryStreamOutput struct {
_ struct { } ` type:"structure" `
// The ARN of the delivery stream.
DeliveryStreamARN * string ` type:"string" `
}
// String returns the string representation
func ( s CreateDeliveryStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s CreateDeliveryStreamOutput ) GoString ( ) string {
return s . String ( )
}
// Contains the parameters for DeleteDeliveryStream.
type DeleteDeliveryStreamInput struct {
_ struct { } ` type:"structure" `
// The name of the delivery stream.
DeliveryStreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s DeleteDeliveryStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DeleteDeliveryStreamInput ) GoString ( ) string {
return s . String ( )
}
// Contains the output of DeleteDeliveryStream.
type DeleteDeliveryStreamOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s DeleteDeliveryStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DeleteDeliveryStreamOutput ) GoString ( ) string {
return s . String ( )
}
// Contains information about a delivery stream.
type DeliveryStreamDescription struct {
_ struct { } ` type:"structure" `
// The date and time that the delivery stream was created.
CreateTimestamp * time . Time ` type:"timestamp" timestampFormat:"unix" `
// The Amazon Resource Name (ARN) of the delivery stream.
DeliveryStreamARN * string ` type:"string" required:"true" `
// The name of the delivery stream.
DeliveryStreamName * string ` min:"1" type:"string" required:"true" `
// The status of the delivery stream.
DeliveryStreamStatus * string ` type:"string" required:"true" enum:"DeliveryStreamStatus" `
// The destinations.
Destinations [ ] * DestinationDescription ` type:"list" required:"true" `
// Indicates whether there are more destinations available to list.
HasMoreDestinations * bool ` type:"boolean" required:"true" `
// The date and time that the delivery stream was last updated.
LastUpdateTimestamp * time . Time ` type:"timestamp" timestampFormat:"unix" `
// Used when calling the UpdateDestination operation. Each time the destination
// is updated for the delivery stream, the VersionId is changed, and the current
// VersionId is required when updating the destination. This is so that the
// service knows it is applying the changes to the correct version of the delivery
// stream.
VersionId * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s DeliveryStreamDescription ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DeliveryStreamDescription ) GoString ( ) string {
return s . String ( )
}
// Contains the parameters for DescribeDeliveryStream.
type DescribeDeliveryStreamInput struct {
_ struct { } ` type:"structure" `
// The name of the delivery stream.
DeliveryStreamName * string ` min:"1" type:"string" required:"true" `
// Specifies the destination ID to start returning the destination information.
// Currently Amazon Kinesis Firehose supports one destination per delivery stream.
ExclusiveStartDestinationId * string ` min:"1" type:"string" `
// The limit on the number of destinations to return. Currently, you can have
// one destination per delivery stream.
Limit * int64 ` min:"1" type:"integer" `
}
// String returns the string representation
func ( s DescribeDeliveryStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DescribeDeliveryStreamInput ) GoString ( ) string {
return s . String ( )
}
// Contains the output of DescribeDeliveryStream.
type DescribeDeliveryStreamOutput struct {
_ struct { } ` type:"structure" `
// Information about the delivery stream.
DeliveryStreamDescription * DeliveryStreamDescription ` type:"structure" required:"true" `
}
// String returns the string representation
func ( s DescribeDeliveryStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DescribeDeliveryStreamOutput ) GoString ( ) string {
return s . String ( )
}
// Describes the destination for a delivery stream.
type DestinationDescription struct {
_ struct { } ` type:"structure" `
// The ID of the destination.
DestinationId * string ` min:"1" type:"string" required:"true" `
// The destination in Amazon Redshift.
RedshiftDestinationDescription * RedshiftDestinationDescription ` type:"structure" `
// The Amazon S3 destination.
S3DestinationDescription * S3DestinationDescription ` type:"structure" `
}
// String returns the string representation
func ( s DestinationDescription ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DestinationDescription ) GoString ( ) string {
return s . String ( )
}
// Describes the encryption for a destination in Amazon S3.
type EncryptionConfiguration struct {
_ struct { } ` type:"structure" `
// The encryption key.
KMSEncryptionConfig * KMSEncryptionConfig ` type:"structure" `
// Specifically override existing encryption information to ensure no encryption
// is used.
NoEncryptionConfig * string ` type:"string" enum:"NoEncryptionConfig" `
}
// String returns the string representation
func ( s EncryptionConfiguration ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s EncryptionConfiguration ) GoString ( ) string {
return s . String ( )
}
// Describes an encryption key for a destination in Amazon S3.
type KMSEncryptionConfig struct {
_ struct { } ` type:"structure" `
// The ARN of the encryption key. Must belong to the same region as the destination
// Amazon S3 bucket.
AWSKMSKeyARN * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s KMSEncryptionConfig ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s KMSEncryptionConfig ) GoString ( ) string {
return s . String ( )
}
// Contains the parameters for ListDeliveryStreams.
type ListDeliveryStreamsInput struct {
_ struct { } ` type:"structure" `
// The name of the delivery stream to start the list with.
ExclusiveStartDeliveryStreamName * string ` min:"1" type:"string" `
// The maximum number of delivery streams to list.
Limit * int64 ` min:"1" type:"integer" `
}
// String returns the string representation
func ( s ListDeliveryStreamsInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s ListDeliveryStreamsInput ) GoString ( ) string {
return s . String ( )
}
// Contains the output of ListDeliveryStreams.
type ListDeliveryStreamsOutput struct {
_ struct { } ` type:"structure" `
// The names of the delivery streams.
DeliveryStreamNames [ ] * string ` type:"list" required:"true" `
// Indicates whether there are more delivery streams available to list.
HasMoreDeliveryStreams * bool ` type:"boolean" required:"true" `
}
// String returns the string representation
func ( s ListDeliveryStreamsOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s ListDeliveryStreamsOutput ) GoString ( ) string {
return s . String ( )
}
// Contains the parameters for PutRecordBatch.
type PutRecordBatchInput struct {
_ struct { } ` type:"structure" `
// The name of the delivery stream.
DeliveryStreamName * string ` min:"1" type:"string" required:"true" `
// One or more records.
Records [ ] * Record ` min:"1" type:"list" required:"true" `
}
// String returns the string representation
func ( s PutRecordBatchInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordBatchInput ) GoString ( ) string {
return s . String ( )
}
// Contains the output of PutRecordBatch.
type PutRecordBatchOutput struct {
_ struct { } ` type:"structure" `
// The number of unsuccessfully written records.
FailedPutCount * int64 ` type:"integer" required:"true" `
// The results for the individual records. The index of each element matches
// the same index in which records were sent.
RequestResponses [ ] * PutRecordBatchResponseEntry ` min:"1" type:"list" required:"true" `
}
// String returns the string representation
func ( s PutRecordBatchOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordBatchOutput ) GoString ( ) string {
return s . String ( )
}
// Contains the result for an individual record from a PutRecordBatch request.
// If the record is successfully added to your delivery stream, it receives
// a record ID. If the record fails to be added to your delivery stream, the
// result includes an error code and an error message.
type PutRecordBatchResponseEntry struct {
_ struct { } ` type:"structure" `
// The error code for an individual record result.
ErrorCode * string ` type:"string" `
// The error message for an individual record result.
ErrorMessage * string ` type:"string" `
// The ID of the record.
RecordId * string ` min:"1" type:"string" `
}
// String returns the string representation
func ( s PutRecordBatchResponseEntry ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordBatchResponseEntry ) GoString ( ) string {
return s . String ( )
}
// Contains the parameters for PutRecord.
type PutRecordInput struct {
_ struct { } ` type:"structure" `
// The name of the delivery stream.
DeliveryStreamName * string ` min:"1" type:"string" required:"true" `
// The record.
Record * Record ` type:"structure" required:"true" `
}
// String returns the string representation
func ( s PutRecordInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordInput ) GoString ( ) string {
return s . String ( )
}
// Contains the output of PutRecord.
type PutRecordOutput struct {
_ struct { } ` type:"structure" `
// The ID of the record.
RecordId * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s PutRecordOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordOutput ) GoString ( ) string {
return s . String ( )
}
// The unit of data in a delivery stream.
type Record struct {
_ struct { } ` type:"structure" `
// The data blob, which is base64-encoded when the blob is serialized. The maximum
// size of the data blob, before base64-encoding, is 1,000 KB.
2016-03-18 20:35:09 +01:00
//
// Data is automatically base64 encoded/decoded by the SDK.
2016-01-29 20:53:56 +01:00
Data [ ] byte ` type:"blob" required:"true" `
}
// String returns the string representation
func ( s Record ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s Record ) GoString ( ) string {
return s . String ( )
}
// Describes the configuration of a destination in Amazon Redshift.
type RedshiftDestinationConfiguration struct {
_ struct { } ` type:"structure" `
// The database connection string.
ClusterJDBCURL * string ` min:"1" type:"string" required:"true" `
// The COPY command.
CopyCommand * CopyCommand ` type:"structure" required:"true" `
// The user password.
Password * string ` min:"6" type:"string" required:"true" `
// The ARN of the AWS credentials.
RoleARN * string ` min:"1" type:"string" required:"true" `
// The S3 configuration for the intermediate location from which Amazon Redshift
// obtains data. Restrictions are described in the topic for CreateDeliveryStream.
//
// The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration
// because the Amazon Redshift COPY operation that reads from the S3 bucket
// doesn't support these compression formats.
S3Configuration * S3DestinationConfiguration ` type:"structure" required:"true" `
// The name of the user.
Username * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s RedshiftDestinationConfiguration ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s RedshiftDestinationConfiguration ) GoString ( ) string {
return s . String ( )
}
// Describes a destination in Amazon Redshift.
type RedshiftDestinationDescription struct {
_ struct { } ` type:"structure" `
// The database connection string.
ClusterJDBCURL * string ` min:"1" type:"string" required:"true" `
// The COPY command.
CopyCommand * CopyCommand ` type:"structure" required:"true" `
// The ARN of the AWS credentials.
RoleARN * string ` min:"1" type:"string" required:"true" `
// The Amazon S3 destination.
S3DestinationDescription * S3DestinationDescription ` type:"structure" required:"true" `
// The name of the user.
Username * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s RedshiftDestinationDescription ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s RedshiftDestinationDescription ) GoString ( ) string {
return s . String ( )
}
// Describes an update for a destination in Amazon Redshift.
type RedshiftDestinationUpdate struct {
_ struct { } ` type:"structure" `
// The database connection string.
ClusterJDBCURL * string ` min:"1" type:"string" `
// The COPY command.
CopyCommand * CopyCommand ` type:"structure" `
// The user password.
Password * string ` min:"6" type:"string" `
// The ARN of the AWS credentials.
RoleARN * string ` min:"1" type:"string" `
// The Amazon S3 destination.
//
// The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update
// because the Amazon Redshift COPY operation that reads from the S3 bucket
// doesn't support these compression formats.
S3Update * S3DestinationUpdate ` type:"structure" `
// The name of the user.
Username * string ` min:"1" type:"string" `
}
// String returns the string representation
func ( s RedshiftDestinationUpdate ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s RedshiftDestinationUpdate ) GoString ( ) string {
return s . String ( )
}
// Describes the configuration of a destination in Amazon S3.
type S3DestinationConfiguration struct {
_ struct { } ` type:"structure" `
// The ARN of the S3 bucket.
BucketARN * string ` min:"1" type:"string" required:"true" `
// The buffering option. If no value is specified, BufferingHints object default
// values are used.
BufferingHints * BufferingHints ` type:"structure" `
// The compression format. If no value is specified, the default is UNCOMPRESSED.
//
// The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift
// destinations because they are not supported by the Amazon Redshift COPY operation
// that reads from the S3 bucket.
CompressionFormat * string ` type:"string" enum:"CompressionFormat" `
// The encryption configuration. If no value is specified, the default is no
// encryption.
EncryptionConfiguration * EncryptionConfiguration ` type:"structure" `
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// S3 files. You can specify an extra prefix to be added in front of the time
// format prefix. Note that if the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html)
// in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/).
Prefix * string ` type:"string" `
// The ARN of the AWS credentials.
RoleARN * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s S3DestinationConfiguration ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s S3DestinationConfiguration ) GoString ( ) string {
return s . String ( )
}
// Describes a destination in Amazon S3.
type S3DestinationDescription struct {
_ struct { } ` type:"structure" `
// The ARN of the S3 bucket.
BucketARN * string ` min:"1" type:"string" required:"true" `
// The buffering option. If no value is specified, BufferingHints object default
// values are used.
BufferingHints * BufferingHints ` type:"structure" required:"true" `
// The compression format. If no value is specified, the default is NOCOMPRESSION.
CompressionFormat * string ` type:"string" required:"true" enum:"CompressionFormat" `
// The encryption configuration. If no value is specified, the default is no
// encryption.
EncryptionConfiguration * EncryptionConfiguration ` type:"structure" required:"true" `
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// S3 files. You can specify an extra prefix to be added in front of the time
// format prefix. Note that if the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html)
// in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/).
Prefix * string ` type:"string" `
// The ARN of the AWS credentials.
RoleARN * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s S3DestinationDescription ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s S3DestinationDescription ) GoString ( ) string {
return s . String ( )
}
// Describes an update for a destination in Amazon S3.
type S3DestinationUpdate struct {
_ struct { } ` type:"structure" `
// The ARN of the S3 bucket.
BucketARN * string ` min:"1" type:"string" `
// The buffering option. If no value is specified, BufferingHints object default
// values are used.
BufferingHints * BufferingHints ` type:"structure" `
// The compression format. If no value is specified, the default is NOCOMPRESSION.
//
// The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift
// destinations because they are not supported by the Amazon Redshift COPY operation
// that reads from the S3 bucket.
CompressionFormat * string ` type:"string" enum:"CompressionFormat" `
// The encryption configuration. If no value is specified, the default is no
// encryption.
EncryptionConfiguration * EncryptionConfiguration ` type:"structure" `
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// S3 files. You can specify an extra prefix to be added in front of the time
// format prefix. Note that if the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html)
// in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/).
Prefix * string ` type:"string" `
// The ARN of the AWS credentials.
RoleARN * string ` min:"1" type:"string" `
}
// String returns the string representation
func ( s S3DestinationUpdate ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s S3DestinationUpdate ) GoString ( ) string {
return s . String ( )
}
// Contains the parameters for UpdateDestination.
type UpdateDestinationInput struct {
_ struct { } ` type:"structure" `
// Obtain this value from the VersionId result of the DeliveryStreamDescription
// operation. This value is required, and helps the service to perform conditional
// operations. For example, if there is a interleaving update and this value
// is null, then the update destination fails. After the update is successful,
// the VersionId value is updated. The service then performs a merge of the
// old configuration with the new configuration.
CurrentDeliveryStreamVersionId * string ` min:"1" type:"string" required:"true" `
// The name of the delivery stream.
DeliveryStreamName * string ` min:"1" type:"string" required:"true" `
// The ID of the destination.
DestinationId * string ` min:"1" type:"string" required:"true" `
// Describes an update for a destination in Amazon Redshift.
RedshiftDestinationUpdate * RedshiftDestinationUpdate ` type:"structure" `
// Describes an update for a destination in Amazon S3.
S3DestinationUpdate * S3DestinationUpdate ` type:"structure" `
}
// String returns the string representation
func ( s UpdateDestinationInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s UpdateDestinationInput ) GoString ( ) string {
return s . String ( )
}
// Contains the output of UpdateDestination.
type UpdateDestinationOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s UpdateDestinationOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s UpdateDestinationOutput ) GoString ( ) string {
return s . String ( )
}
const (
// @enum CompressionFormat
CompressionFormatUncompressed = "UNCOMPRESSED"
// @enum CompressionFormat
CompressionFormatGzip = "GZIP"
// @enum CompressionFormat
CompressionFormatZip = "ZIP"
// @enum CompressionFormat
CompressionFormatSnappy = "Snappy"
)
const (
// @enum DeliveryStreamStatus
DeliveryStreamStatusCreating = "CREATING"
// @enum DeliveryStreamStatus
DeliveryStreamStatusDeleting = "DELETING"
// @enum DeliveryStreamStatus
DeliveryStreamStatusActive = "ACTIVE"
)
const (
// @enum NoEncryptionConfig
NoEncryptionConfigNoEncryption = "NoEncryption"
)