2016-01-29 20:53:56 +01:00
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package kinesis provides a client for Amazon Kinesis.
package kinesis
import (
2016-05-05 03:06:27 +02:00
"fmt"
2016-01-29 20:53:56 +01:00
"time"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
2016-02-15 20:59:49 +01:00
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
2016-01-29 20:53:56 +01:00
)
const opAddTagsToStream = "AddTagsToStream"
2016-07-15 15:49:02 +02:00
// AddTagsToStreamRequest generates a "aws/request.Request" representing the
// client's request for the AddTagsToStream operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the AddTagsToStream method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the AddTagsToStreamRequest method.
// req, resp := client.AddTagsToStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) AddTagsToStreamRequest ( input * AddTagsToStreamInput ) ( req * request . Request , output * AddTagsToStreamOutput ) {
op := & request . Operation {
Name : opAddTagsToStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & AddTagsToStreamInput { }
}
req = c . newRequest ( op , input , output )
2016-02-15 20:59:49 +01:00
req . Handlers . Unmarshal . Remove ( jsonrpc . UnmarshalHandler )
req . Handlers . Unmarshal . PushBackNamed ( protocol . UnmarshalDiscardBodyHandler )
2016-01-29 20:53:56 +01:00
output = & AddTagsToStreamOutput { }
req . Data = output
return
}
// Adds or updates tags for the specified Amazon Kinesis stream. Each stream
// can have up to 10 tags.
//
// If tags have already been assigned to the stream, AddTagsToStream overwrites
// any existing tags that correspond to the specified tag keys.
func ( c * Kinesis ) AddTagsToStream ( input * AddTagsToStreamInput ) ( * AddTagsToStreamOutput , error ) {
req , out := c . AddTagsToStreamRequest ( input )
err := req . Send ( )
return out , err
}
const opCreateStream = "CreateStream"
2016-07-15 15:49:02 +02:00
// CreateStreamRequest generates a "aws/request.Request" representing the
// client's request for the CreateStream operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the CreateStream method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the CreateStreamRequest method.
// req, resp := client.CreateStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) CreateStreamRequest ( input * CreateStreamInput ) ( req * request . Request , output * CreateStreamOutput ) {
op := & request . Operation {
Name : opCreateStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & CreateStreamInput { }
}
req = c . newRequest ( op , input , output )
2016-02-15 20:59:49 +01:00
req . Handlers . Unmarshal . Remove ( jsonrpc . UnmarshalHandler )
req . Handlers . Unmarshal . PushBackNamed ( protocol . UnmarshalDiscardBodyHandler )
2016-01-29 20:53:56 +01:00
output = & CreateStreamOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Creates an Amazon Kinesis stream. A stream captures and transports data records
2016-01-29 20:53:56 +01:00
// that are continuously emitted from different data sources or producers. Scale-out
2016-05-05 03:06:27 +02:00
// within a stream is explicitly supported by means of shards, which are uniquely
// identified groups of data records in a stream.
2016-01-29 20:53:56 +01:00
//
// You specify and control the number of shards that a stream is composed of.
// Each shard can support reads up to 5 transactions per second, up to a maximum
// data read total of 2 MB per second. Each shard can support writes up to 1,000
// records per second, up to a maximum data write total of 1 MB per second.
// You can add shards to a stream if the amount of data input increases and
// you can remove shards if the amount of data input decreases.
//
// The stream name identifies the stream. The name is scoped to the AWS account
// used by the application. It is also scoped by region. That is, two streams
// in two different accounts can have the same name, and two streams in the
// same account, but in two different regions, can have the same name.
//
// CreateStream is an asynchronous operation. Upon receiving a CreateStream
// request, Amazon Kinesis immediately returns and sets the stream status to
// CREATING. After the stream is created, Amazon Kinesis sets the stream status
// to ACTIVE. You should perform read and write operations only on an ACTIVE
// stream.
//
// You receive a LimitExceededException when making a CreateStream request
// if you try to do one of the following:
//
// Have more than five streams in the CREATING state at any point in time.
// Create more shards than are authorized for your account. For the default
2016-05-05 03:06:27 +02:00
// shard limit for an AWS account, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
// in the Amazon Kinesis Streams Developer Guide. If you need to increase this
// limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
2016-01-29 20:53:56 +01:00
//
// You can use DescribeStream to check the stream status, which is returned
// in StreamStatus.
//
// CreateStream has a limit of 5 transactions per second per account.
func ( c * Kinesis ) CreateStream ( input * CreateStreamInput ) ( * CreateStreamOutput , error ) {
req , out := c . CreateStreamRequest ( input )
err := req . Send ( )
return out , err
}
const opDecreaseStreamRetentionPeriod = "DecreaseStreamRetentionPeriod"
2016-07-15 15:49:02 +02:00
// DecreaseStreamRetentionPeriodRequest generates a "aws/request.Request" representing the
// client's request for the DecreaseStreamRetentionPeriod operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DecreaseStreamRetentionPeriod method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DecreaseStreamRetentionPeriodRequest method.
// req, resp := client.DecreaseStreamRetentionPeriodRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) DecreaseStreamRetentionPeriodRequest ( input * DecreaseStreamRetentionPeriodInput ) ( req * request . Request , output * DecreaseStreamRetentionPeriodOutput ) {
op := & request . Operation {
Name : opDecreaseStreamRetentionPeriod ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & DecreaseStreamRetentionPeriodInput { }
}
req = c . newRequest ( op , input , output )
2016-02-15 20:59:49 +01:00
req . Handlers . Unmarshal . Remove ( jsonrpc . UnmarshalHandler )
req . Handlers . Unmarshal . PushBackNamed ( protocol . UnmarshalDiscardBodyHandler )
2016-01-29 20:53:56 +01:00
output = & DecreaseStreamRetentionPeriodOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Decreases the Amazon Kinesis stream's retention period, which is the length
// of time data records are accessible after they are added to the stream. The
// minimum value of a stream's retention period is 24 hours.
2016-01-29 20:53:56 +01:00
//
// This operation may result in lost data. For example, if the stream's retention
// period is 48 hours and is decreased to 24 hours, any data already in the
// stream that is older than 24 hours is inaccessible.
func ( c * Kinesis ) DecreaseStreamRetentionPeriod ( input * DecreaseStreamRetentionPeriodInput ) ( * DecreaseStreamRetentionPeriodOutput , error ) {
req , out := c . DecreaseStreamRetentionPeriodRequest ( input )
err := req . Send ( )
return out , err
}
const opDeleteStream = "DeleteStream"
2016-07-15 15:49:02 +02:00
// DeleteStreamRequest generates a "aws/request.Request" representing the
// client's request for the DeleteStream operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DeleteStream method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DeleteStreamRequest method.
// req, resp := client.DeleteStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) DeleteStreamRequest ( input * DeleteStreamInput ) ( req * request . Request , output * DeleteStreamOutput ) {
op := & request . Operation {
Name : opDeleteStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & DeleteStreamInput { }
}
req = c . newRequest ( op , input , output )
2016-02-15 20:59:49 +01:00
req . Handlers . Unmarshal . Remove ( jsonrpc . UnmarshalHandler )
req . Handlers . Unmarshal . PushBackNamed ( protocol . UnmarshalDiscardBodyHandler )
2016-01-29 20:53:56 +01:00
output = & DeleteStreamOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Deletes an Amazon Kinesis stream and all its shards and data. You must shut
// down any applications that are operating on the stream before you delete
// the stream. If an application attempts to operate on a deleted stream, it
// will receive the exception ResourceNotFoundException.
2016-01-29 20:53:56 +01:00
//
// If the stream is in the ACTIVE state, you can delete it. After a DeleteStream
// request, the specified stream is in the DELETING state until Amazon Kinesis
// completes the deletion.
//
// Note: Amazon Kinesis might continue to accept data read and write operations,
// such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING
// state until the stream deletion is complete.
//
// When you delete a stream, any shards in that stream are also deleted, and
// any tags are dissociated from the stream.
//
// You can use the DescribeStream operation to check the state of the stream,
// which is returned in StreamStatus.
//
// DeleteStream has a limit of 5 transactions per second per account.
func ( c * Kinesis ) DeleteStream ( input * DeleteStreamInput ) ( * DeleteStreamOutput , error ) {
req , out := c . DeleteStreamRequest ( input )
err := req . Send ( )
return out , err
}
const opDescribeStream = "DescribeStream"
2016-07-15 15:49:02 +02:00
// DescribeStreamRequest generates a "aws/request.Request" representing the
// client's request for the DescribeStream operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeStream method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeStreamRequest method.
// req, resp := client.DescribeStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) DescribeStreamRequest ( input * DescribeStreamInput ) ( req * request . Request , output * DescribeStreamOutput ) {
op := & request . Operation {
Name : opDescribeStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
Paginator : & request . Paginator {
InputTokens : [ ] string { "ExclusiveStartShardId" } ,
OutputTokens : [ ] string { "StreamDescription.Shards[-1].ShardId" } ,
LimitToken : "Limit" ,
TruncationToken : "StreamDescription.HasMoreShards" ,
} ,
}
if input == nil {
input = & DescribeStreamInput { }
}
req = c . newRequest ( op , input , output )
output = & DescribeStreamOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Describes the specified Amazon Kinesis stream.
2016-01-29 20:53:56 +01:00
//
// The information about the stream includes its current status, its Amazon
// Resource Name (ARN), and an array of shard objects. For each shard object,
// there is information about the hash key and sequence number ranges that the
// shard spans, and the IDs of any earlier shards that played in a role in creating
// the shard. A sequence number is the identifier associated with every record
2016-05-05 03:06:27 +02:00
// ingested in the stream. The sequence number is assigned when a record is
// put into the stream.
2016-01-29 20:53:56 +01:00
//
// You can limit the number of returned shards using the Limit parameter. The
// number of shards in a stream may be too large to return from a single call
// to DescribeStream. You can detect this by using the HasMoreShards flag in
// the returned output. HasMoreShards is set to true when there is more data
// available.
//
// DescribeStream is a paginated operation. If there are more shards available,
// you can request them using the shard ID of the last shard returned. Specify
// this ID in the ExclusiveStartShardId parameter in a subsequent request to
// DescribeStream.
//
2016-05-05 03:06:27 +02:00
// There are no guarantees about the chronological order shards returned in
// DescribeStream results. If you want to process shards in chronological order,
// use ParentShardId to track lineage to the oldest shard.
//
2016-01-29 20:53:56 +01:00
// DescribeStream has a limit of 10 transactions per second per account.
func ( c * Kinesis ) DescribeStream ( input * DescribeStreamInput ) ( * DescribeStreamOutput , error ) {
req , out := c . DescribeStreamRequest ( input )
err := req . Send ( )
return out , err
}
2016-07-15 15:49:02 +02:00
// DescribeStreamPages iterates over the pages of a DescribeStream operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeStream method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeStream operation.
// pageNum := 0
// err := client.DescribeStreamPages(params,
// func(page *DescribeStreamOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) DescribeStreamPages ( input * DescribeStreamInput , fn func ( p * DescribeStreamOutput , lastPage bool ) ( shouldContinue bool ) ) error {
page , _ := c . DescribeStreamRequest ( input )
page . Handlers . Build . PushBack ( request . MakeAddToUserAgentFreeFormHandler ( "Paginator" ) )
return page . EachPage ( func ( p interface { } , lastPage bool ) bool {
return fn ( p . ( * DescribeStreamOutput ) , lastPage )
} )
}
2016-05-05 03:06:27 +02:00
const opDisableEnhancedMonitoring = "DisableEnhancedMonitoring"
2016-07-15 15:49:02 +02:00
// DisableEnhancedMonitoringRequest generates a "aws/request.Request" representing the
// client's request for the DisableEnhancedMonitoring operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DisableEnhancedMonitoring method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DisableEnhancedMonitoringRequest method.
// req, resp := client.DisableEnhancedMonitoringRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-05-05 03:06:27 +02:00
func ( c * Kinesis ) DisableEnhancedMonitoringRequest ( input * DisableEnhancedMonitoringInput ) ( req * request . Request , output * EnhancedMonitoringOutput ) {
op := & request . Operation {
Name : opDisableEnhancedMonitoring ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & DisableEnhancedMonitoringInput { }
}
req = c . newRequest ( op , input , output )
output = & EnhancedMonitoringOutput { }
req . Data = output
return
}
// Disables enhanced monitoring.
func ( c * Kinesis ) DisableEnhancedMonitoring ( input * DisableEnhancedMonitoringInput ) ( * EnhancedMonitoringOutput , error ) {
req , out := c . DisableEnhancedMonitoringRequest ( input )
err := req . Send ( )
return out , err
}
const opEnableEnhancedMonitoring = "EnableEnhancedMonitoring"
2016-07-15 15:49:02 +02:00
// EnableEnhancedMonitoringRequest generates a "aws/request.Request" representing the
// client's request for the EnableEnhancedMonitoring operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the EnableEnhancedMonitoring method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the EnableEnhancedMonitoringRequest method.
// req, resp := client.EnableEnhancedMonitoringRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-05-05 03:06:27 +02:00
func ( c * Kinesis ) EnableEnhancedMonitoringRequest ( input * EnableEnhancedMonitoringInput ) ( req * request . Request , output * EnhancedMonitoringOutput ) {
op := & request . Operation {
Name : opEnableEnhancedMonitoring ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & EnableEnhancedMonitoringInput { }
}
req = c . newRequest ( op , input , output )
output = & EnhancedMonitoringOutput { }
req . Data = output
return
}
// Enables enhanced Amazon Kinesis stream monitoring for shard-level metrics.
func ( c * Kinesis ) EnableEnhancedMonitoring ( input * EnableEnhancedMonitoringInput ) ( * EnhancedMonitoringOutput , error ) {
req , out := c . EnableEnhancedMonitoringRequest ( input )
err := req . Send ( )
return out , err
}
2016-01-29 20:53:56 +01:00
const opGetRecords = "GetRecords"
2016-07-15 15:49:02 +02:00
// GetRecordsRequest generates a "aws/request.Request" representing the
// client's request for the GetRecords operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the GetRecords method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the GetRecordsRequest method.
// req, resp := client.GetRecordsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) GetRecordsRequest ( input * GetRecordsInput ) ( req * request . Request , output * GetRecordsOutput ) {
op := & request . Operation {
Name : opGetRecords ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & GetRecordsInput { }
}
req = c . newRequest ( op , input , output )
output = & GetRecordsOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Gets data records from an Amazon Kinesis stream's shard.
2016-01-29 20:53:56 +01:00
//
// Specify a shard iterator using the ShardIterator parameter. The shard iterator
// specifies the position in the shard from which you want to start reading
// data records sequentially. If there are no records available in the portion
// of the shard that the iterator points to, GetRecords returns an empty list.
// Note that it might take multiple calls to get to a portion of the shard that
// contains records.
//
2016-05-05 03:06:27 +02:00
// You can scale by provisioning multiple shards per stream while considering
// service limits (for more information, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
// in the Amazon Kinesis Streams Developer Guide). Your application should have
2016-01-29 20:53:56 +01:00
// one thread per shard, each reading continuously from its stream. To read
// from a stream continually, call GetRecords in a loop. Use GetShardIterator
// to get the shard iterator to specify in the first GetRecords call. GetRecords
// returns a new shard iterator in NextShardIterator. Specify the shard iterator
// returned in NextShardIterator in subsequent calls to GetRecords. Note that
// if the shard has been closed, the shard iterator can't return more data and
// GetRecords returns null in NextShardIterator. You can terminate the loop
// when the shard is closed, or when the shard iterator reaches the record with
// the sequence number or other attribute that marks it as the last record to
// process.
//
// Each data record can be up to 1 MB in size, and each shard can read up to
// 2 MB per second. You can ensure that your calls don't exceed the maximum
// supported size or throughput by using the Limit parameter to specify the
// maximum number of records that GetRecords can return. Consider your average
// record size when determining this limit.
//
2016-05-05 03:06:27 +02:00
// The size of the data returned by GetRecords varies depending on the utilization
2016-01-29 20:53:56 +01:00
// of the shard. The maximum size of data that GetRecords can return is 10 MB.
// If a call returns this amount of data, subsequent calls made within the next
// 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient
// provisioned throughput on the shard, subsequent calls made within the next
// 1 second throw ProvisionedThroughputExceededException. Note that GetRecords
// won't return any data when it throws an exception. For this reason, we recommend
// that you wait one second between calls to GetRecords; however, it's possible
// that the application will get exceptions for longer than 1 second.
//
// To detect whether the application is falling behind in processing, you can
// use the MillisBehindLatest response attribute. You can also monitor the stream
2016-05-05 03:06:27 +02:00
// using CloudWatch metrics and other mechanisms (see Monitoring (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html)
// in the Amazon Kinesis Streams Developer Guide).
2016-01-29 20:53:56 +01:00
//
// Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp,
2016-05-05 03:06:27 +02:00
// that is set when a stream successfully receives and stores a record. This
// is commonly referred to as a server-side timestamp, whereas a client-side
// timestamp is set when a data producer creates or sends the record to a stream
// (a data producer is any data source putting data records into a stream, for
// example with PutRecords). The timestamp has millisecond precision. There
// are no guarantees about the timestamp accuracy, or that the timestamp is
// always increasing. For example, records in a shard or across a stream might
// have timestamps that are out of order.
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) GetRecords ( input * GetRecordsInput ) ( * GetRecordsOutput , error ) {
req , out := c . GetRecordsRequest ( input )
err := req . Send ( )
return out , err
}
const opGetShardIterator = "GetShardIterator"
2016-07-15 15:49:02 +02:00
// GetShardIteratorRequest generates a "aws/request.Request" representing the
// client's request for the GetShardIterator operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the GetShardIterator method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the GetShardIteratorRequest method.
// req, resp := client.GetShardIteratorRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) GetShardIteratorRequest ( input * GetShardIteratorInput ) ( req * request . Request , output * GetShardIteratorOutput ) {
op := & request . Operation {
Name : opGetShardIterator ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & GetShardIteratorInput { }
}
req = c . newRequest ( op , input , output )
output = & GetShardIteratorOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Gets an Amazon Kinesis shard iterator. A shard iterator expires five minutes
// after it is returned to the requester.
2016-01-29 20:53:56 +01:00
//
2016-05-05 03:06:27 +02:00
// A shard iterator specifies the shard position from which to start reading
// data records sequentially. The position is specified using the sequence number
// of a data record in a shard. A sequence number is the identifier associated
// with every record ingested in the stream, and is assigned when a record is
// put into the stream. Each stream has one or more shards.
2016-01-29 20:53:56 +01:00
//
// You must specify the shard iterator type. For example, you can set the ShardIteratorType
// parameter to read exactly from the position denoted by a specific sequence
// number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after
// the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type,
// using sequence numbers returned by earlier calls to PutRecord, PutRecords,
2016-05-05 03:06:27 +02:00
// GetRecords, or DescribeStream. In the request, you can specify the shard
// iterator type AT_TIMESTAMP to read records from an arbitrary point in time,
// TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record
// in the shard in the system (the oldest data record in the shard), or LATEST
// so that you always read the most recent data in the shard.
//
// When you read repeatedly from a stream, use a GetShardIterator request to
// get the first shard iterator for use in your first GetRecords request and
// for subsequent reads use the shard iterator returned by the GetRecords request
// in NextShardIterator. A new shard iterator is returned by every GetRecords
// request in NextShardIterator, which you use in the ShardIterator parameter
// of the next GetRecords request.
2016-01-29 20:53:56 +01:00
//
// If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException.
2016-05-05 03:06:27 +02:00
// For more information about throughput limits, see GetRecords, and Streams
// Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
// in the Amazon Kinesis Streams Developer Guide.
2016-01-29 20:53:56 +01:00
//
2016-05-05 03:06:27 +02:00
// If the shard is closed, GetShardIterator returns a valid iterator for the
// last sequence number of the shard. Note that a shard can be closed as a result
// of using SplitShard or MergeShards.
2016-01-29 20:53:56 +01:00
//
// GetShardIterator has a limit of 5 transactions per second per account per
// open shard.
func ( c * Kinesis ) GetShardIterator ( input * GetShardIteratorInput ) ( * GetShardIteratorOutput , error ) {
req , out := c . GetShardIteratorRequest ( input )
err := req . Send ( )
return out , err
}
const opIncreaseStreamRetentionPeriod = "IncreaseStreamRetentionPeriod"
2016-07-15 15:49:02 +02:00
// IncreaseStreamRetentionPeriodRequest generates a "aws/request.Request" representing the
// client's request for the IncreaseStreamRetentionPeriod operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the IncreaseStreamRetentionPeriod method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the IncreaseStreamRetentionPeriodRequest method.
// req, resp := client.IncreaseStreamRetentionPeriodRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) IncreaseStreamRetentionPeriodRequest ( input * IncreaseStreamRetentionPeriodInput ) ( req * request . Request , output * IncreaseStreamRetentionPeriodOutput ) {
op := & request . Operation {
Name : opIncreaseStreamRetentionPeriod ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & IncreaseStreamRetentionPeriodInput { }
}
req = c . newRequest ( op , input , output )
2016-02-15 20:59:49 +01:00
req . Handlers . Unmarshal . Remove ( jsonrpc . UnmarshalHandler )
req . Handlers . Unmarshal . PushBackNamed ( protocol . UnmarshalDiscardBodyHandler )
2016-01-29 20:53:56 +01:00
output = & IncreaseStreamRetentionPeriodOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Increases the Amazon Kinesis stream's retention period, which is the length
// of time data records are accessible after they are added to the stream. The
// maximum value of a stream's retention period is 168 hours (7 days).
2016-01-29 20:53:56 +01:00
//
// Upon choosing a longer stream retention period, this operation will increase
// the time period records are accessible that have not yet expired. However,
2016-05-05 03:06:27 +02:00
// it will not make previous data that has expired (older than the stream's
2016-01-29 20:53:56 +01:00
// previous retention period) accessible after the operation has been called.
2016-05-05 03:06:27 +02:00
// For example, if a stream's retention period is set to 24 hours and is increased
2016-01-29 20:53:56 +01:00
// to 168 hours, any data that is older than 24 hours will remain inaccessible
// to consumer applications.
func ( c * Kinesis ) IncreaseStreamRetentionPeriod ( input * IncreaseStreamRetentionPeriodInput ) ( * IncreaseStreamRetentionPeriodOutput , error ) {
req , out := c . IncreaseStreamRetentionPeriodRequest ( input )
err := req . Send ( )
return out , err
}
const opListStreams = "ListStreams"
2016-07-15 15:49:02 +02:00
// ListStreamsRequest generates a "aws/request.Request" representing the
// client's request for the ListStreams operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ListStreams method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ListStreamsRequest method.
// req, resp := client.ListStreamsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) ListStreamsRequest ( input * ListStreamsInput ) ( req * request . Request , output * ListStreamsOutput ) {
op := & request . Operation {
Name : opListStreams ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
Paginator : & request . Paginator {
InputTokens : [ ] string { "ExclusiveStartStreamName" } ,
OutputTokens : [ ] string { "StreamNames[-1]" } ,
LimitToken : "Limit" ,
TruncationToken : "HasMoreStreams" ,
} ,
}
if input == nil {
input = & ListStreamsInput { }
}
req = c . newRequest ( op , input , output )
output = & ListStreamsOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Lists your Amazon Kinesis streams.
2016-01-29 20:53:56 +01:00
//
2016-05-05 03:06:27 +02:00
// The number of streams may be too large to return from a single call to ListStreams.
// You can limit the number of returned streams using the Limit parameter. If
// you do not specify a value for the Limit parameter, Amazon Kinesis uses the
// default limit, which is currently 10.
2016-01-29 20:53:56 +01:00
//
2016-05-05 03:06:27 +02:00
// You can detect if there are more streams available to list by using the
2016-01-29 20:53:56 +01:00
// HasMoreStreams flag from the returned output. If there are more streams available,
// you can request more streams by using the name of the last stream returned
// by the ListStreams request in the ExclusiveStartStreamName parameter in a
// subsequent request to ListStreams. The group of stream names returned by
// the subsequent request is then added to the list. You can continue this process
// until all the stream names have been collected in the list.
//
// ListStreams has a limit of 5 transactions per second per account.
func ( c * Kinesis ) ListStreams ( input * ListStreamsInput ) ( * ListStreamsOutput , error ) {
req , out := c . ListStreamsRequest ( input )
err := req . Send ( )
return out , err
}
2016-07-15 15:49:02 +02:00
// ListStreamsPages iterates over the pages of a ListStreams operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListStreams method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListStreams operation.
// pageNum := 0
// err := client.ListStreamsPages(params,
// func(page *ListStreamsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) ListStreamsPages ( input * ListStreamsInput , fn func ( p * ListStreamsOutput , lastPage bool ) ( shouldContinue bool ) ) error {
page , _ := c . ListStreamsRequest ( input )
page . Handlers . Build . PushBack ( request . MakeAddToUserAgentFreeFormHandler ( "Paginator" ) )
return page . EachPage ( func ( p interface { } , lastPage bool ) bool {
return fn ( p . ( * ListStreamsOutput ) , lastPage )
} )
}
const opListTagsForStream = "ListTagsForStream"
2016-07-15 15:49:02 +02:00
// ListTagsForStreamRequest generates a "aws/request.Request" representing the
// client's request for the ListTagsForStream operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ListTagsForStream method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ListTagsForStreamRequest method.
// req, resp := client.ListTagsForStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) ListTagsForStreamRequest ( input * ListTagsForStreamInput ) ( req * request . Request , output * ListTagsForStreamOutput ) {
op := & request . Operation {
Name : opListTagsForStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & ListTagsForStreamInput { }
}
req = c . newRequest ( op , input , output )
output = & ListTagsForStreamOutput { }
req . Data = output
return
}
// Lists the tags for the specified Amazon Kinesis stream.
func ( c * Kinesis ) ListTagsForStream ( input * ListTagsForStreamInput ) ( * ListTagsForStreamOutput , error ) {
req , out := c . ListTagsForStreamRequest ( input )
err := req . Send ( )
return out , err
}
const opMergeShards = "MergeShards"
2016-07-15 15:49:02 +02:00
// MergeShardsRequest generates a "aws/request.Request" representing the
// client's request for the MergeShards operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the MergeShards method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the MergeShardsRequest method.
// req, resp := client.MergeShardsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) MergeShardsRequest ( input * MergeShardsInput ) ( req * request . Request , output * MergeShardsOutput ) {
op := & request . Operation {
Name : opMergeShards ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & MergeShardsInput { }
}
req = c . newRequest ( op , input , output )
2016-02-15 20:59:49 +01:00
req . Handlers . Unmarshal . Remove ( jsonrpc . UnmarshalHandler )
req . Handlers . Unmarshal . PushBackNamed ( protocol . UnmarshalDiscardBodyHandler )
2016-01-29 20:53:56 +01:00
output = & MergeShardsOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Merges two adjacent shards in an Amazon Kinesis stream and combines them
// into a single shard to reduce the stream's capacity to ingest and transport
// data. Two shards are considered adjacent if the union of the hash key ranges
// for the two shards form a contiguous set with no gaps. For example, if you
// have two shards, one with a hash key range of 276...381 and the other with
// a hash key range of 382...454, then you could merge these two shards into
// a single shard that would have a hash key range of 276...454. After the merge,
// the single child shard receives data for all hash key values covered by the
// two parent shards.
2016-01-29 20:53:56 +01:00
//
// MergeShards is called when there is a need to reduce the overall capacity
// of a stream because of excess capacity that is not being used. You must specify
// the shard to be merged and the adjacent shard for a stream. For more information
// about merging shards, see Merge Two Shards (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html)
2016-05-05 03:06:27 +02:00
// in the Amazon Kinesis Streams Developer Guide.
2016-01-29 20:53:56 +01:00
//
// If the stream is in the ACTIVE state, you can call MergeShards. If a stream
// is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException.
// If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.
//
// You can use DescribeStream to check the state of the stream, which is returned
// in StreamStatus.
//
// MergeShards is an asynchronous operation. Upon receiving a MergeShards request,
// Amazon Kinesis immediately returns a response and sets the StreamStatus to
// UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus
// to ACTIVE. Read and write operations continue to work while the stream is
// in the UPDATING state.
//
// You use DescribeStream to determine the shard IDs that are specified in
// the MergeShards request.
//
// If you try to operate on too many streams in parallel using CreateStream,
// DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException.
//
// MergeShards has limit of 5 transactions per second per account.
func ( c * Kinesis ) MergeShards ( input * MergeShardsInput ) ( * MergeShardsOutput , error ) {
req , out := c . MergeShardsRequest ( input )
err := req . Send ( )
return out , err
}
const opPutRecord = "PutRecord"
2016-07-15 15:49:02 +02:00
// PutRecordRequest generates a "aws/request.Request" representing the
// client's request for the PutRecord operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the PutRecord method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the PutRecordRequest method.
// req, resp := client.PutRecordRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) PutRecordRequest ( input * PutRecordInput ) ( req * request . Request , output * PutRecordOutput ) {
op := & request . Operation {
Name : opPutRecord ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & PutRecordInput { }
}
req = c . newRequest ( op , input , output )
output = & PutRecordOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Writes a single data record into an Amazon Kinesis stream. Call PutRecord
// to send data into the stream for real-time ingestion and subsequent processing,
// one record at a time. Each shard can support writes up to 1,000 records per
// second, up to a maximum data write total of 1 MB per second.
2016-01-29 20:53:56 +01:00
//
// You must specify the name of the stream that captures, stores, and transports
// the data; a partition key; and the data blob itself.
//
// The data blob can be any type of data; for example, a segment from a log
// file, geographic/location data, website clickstream data, and so on.
//
// The partition key is used by Amazon Kinesis to distribute data across shards.
2016-05-05 03:06:27 +02:00
// Amazon Kinesis segregates the data records that belong to a stream into multiple
// shards, using the partition key associated with each data record to determine
// which shard a given data record belongs to.
2016-01-29 20:53:56 +01:00
//
// Partition keys are Unicode strings, with a maximum length limit of 256 characters
// for each key. An MD5 hash function is used to map partition keys to 128-bit
// integer values and to map associated data records to shards using the hash
// key ranges of the shards. You can override hashing the partition key to determine
// the shard by explicitly specifying a hash value using the ExplicitHashKey
// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
2016-05-05 03:06:27 +02:00
// in the Amazon Kinesis Streams Developer Guide.
2016-01-29 20:53:56 +01:00
//
// PutRecord returns the shard ID of where the data record was placed and the
// sequence number that was assigned to the data record.
//
2016-05-05 03:06:27 +02:00
// Sequence numbers increase over time and are specific to a shard within a
// stream, not across all shards within a stream. To guarantee strictly increasing
// ordering, write serially to a shard and use the SequenceNumberForOrdering
// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
// in the Amazon Kinesis Streams Developer Guide.
2016-01-29 20:53:56 +01:00
//
// If a PutRecord request cannot be processed because of insufficient provisioned
// throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.
//
2016-05-05 03:06:27 +02:00
// Data records are accessible for only 24 hours from the time that they are
// added to a stream.
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) PutRecord ( input * PutRecordInput ) ( * PutRecordOutput , error ) {
req , out := c . PutRecordRequest ( input )
err := req . Send ( )
return out , err
}
const opPutRecords = "PutRecords"
2016-07-15 15:49:02 +02:00
// PutRecordsRequest generates a "aws/request.Request" representing the
// client's request for the PutRecords operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the PutRecords method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the PutRecordsRequest method.
// req, resp := client.PutRecordsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) PutRecordsRequest ( input * PutRecordsInput ) ( req * request . Request , output * PutRecordsOutput ) {
op := & request . Operation {
Name : opPutRecords ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & PutRecordsInput { }
}
req = c . newRequest ( op , input , output )
output = & PutRecordsOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Writes multiple data records into an Amazon Kinesis stream in a single call
// (also referred to as a PutRecords request). Use this operation to send data
// into the stream for data ingestion and processing.
2016-01-29 20:53:56 +01:00
//
// Each PutRecords request can support up to 500 records. Each record in the
// request can be as large as 1 MB, up to a limit of 5 MB for the entire request,
// including partition keys. Each shard can support writes up to 1,000 records
// per second, up to a maximum data write total of 1 MB per second.
//
// You must specify the name of the stream that captures, stores, and transports
// the data; and an array of request Records, with each record in the array
// requiring a partition key and data blob. The record size limit applies to
// the total size of the partition key and data blob.
//
// The data blob can be any type of data; for example, a segment from a log
// file, geographic/location data, website clickstream data, and so on.
//
// The partition key is used by Amazon Kinesis as input to a hash function
// that maps the partition key and associated data to a specific shard. An MD5
// hash function is used to map partition keys to 128-bit integer values and
// to map associated data records to shards. As a result of this hashing mechanism,
// all data records with the same partition key map to the same shard within
// the stream. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
2016-05-05 03:06:27 +02:00
// in the Amazon Kinesis Streams Developer Guide.
2016-01-29 20:53:56 +01:00
//
// Each record in the Records array may include an optional parameter, ExplicitHashKey,
// which overrides the partition key to shard mapping. This parameter allows
// a data producer to determine explicitly the shard where the record is stored.
// For more information, see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords)
2016-05-05 03:06:27 +02:00
// in the Amazon Kinesis Streams Developer Guide.
2016-01-29 20:53:56 +01:00
//
// The PutRecords response includes an array of response Records. Each record
// in the response array directly correlates with a record in the request array
// using natural ordering, from the top to the bottom of the request and response.
// The response Records array always includes the same number of records as
// the request array.
//
// The response Records array includes both successfully and unsuccessfully
// processed records. Amazon Kinesis attempts to process all records in each
// PutRecords request. A single record failure does not stop the processing
// of subsequent records.
//
// A successfully-processed record includes ShardId and SequenceNumber values.
// The ShardId parameter identifies the shard in the stream where the record
// is stored. The SequenceNumber parameter is an identifier assigned to the
// put record, unique to all records in the stream.
//
// An unsuccessfully-processed record includes ErrorCode and ErrorMessage values.
// ErrorCode reflects the type of error and can be one of the following values:
// ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides
// more detailed information about the ProvisionedThroughputExceededException
// exception including the account ID, stream name, and shard ID of the record
// that was throttled. For more information about partially successful responses,
// see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords)
2016-05-05 03:06:27 +02:00
// in the Amazon Kinesis Streams Developer Guide.
2016-01-29 20:53:56 +01:00
//
// By default, data records are accessible for only 24 hours from the time
// that they are added to an Amazon Kinesis stream. This retention period can
// be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod
// operations.
func ( c * Kinesis ) PutRecords ( input * PutRecordsInput ) ( * PutRecordsOutput , error ) {
req , out := c . PutRecordsRequest ( input )
err := req . Send ( )
return out , err
}
const opRemoveTagsFromStream = "RemoveTagsFromStream"
2016-07-15 15:49:02 +02:00
// RemoveTagsFromStreamRequest generates a "aws/request.Request" representing the
// client's request for the RemoveTagsFromStream operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the RemoveTagsFromStream method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the RemoveTagsFromStreamRequest method.
// req, resp := client.RemoveTagsFromStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) RemoveTagsFromStreamRequest ( input * RemoveTagsFromStreamInput ) ( req * request . Request , output * RemoveTagsFromStreamOutput ) {
op := & request . Operation {
Name : opRemoveTagsFromStream ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & RemoveTagsFromStreamInput { }
}
req = c . newRequest ( op , input , output )
2016-02-15 20:59:49 +01:00
req . Handlers . Unmarshal . Remove ( jsonrpc . UnmarshalHandler )
req . Handlers . Unmarshal . PushBackNamed ( protocol . UnmarshalDiscardBodyHandler )
2016-01-29 20:53:56 +01:00
output = & RemoveTagsFromStreamOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Removes tags from the specified Amazon Kinesis stream. Removed tags are deleted
// and cannot be recovered after this operation successfully completes.
2016-01-29 20:53:56 +01:00
//
// If you specify a tag that does not exist, it is ignored.
func ( c * Kinesis ) RemoveTagsFromStream ( input * RemoveTagsFromStreamInput ) ( * RemoveTagsFromStreamOutput , error ) {
req , out := c . RemoveTagsFromStreamRequest ( input )
err := req . Send ( )
return out , err
}
const opSplitShard = "SplitShard"
2016-07-15 15:49:02 +02:00
// SplitShardRequest generates a "aws/request.Request" representing the
// client's request for the SplitShard operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the SplitShard method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the SplitShardRequest method.
// req, resp := client.SplitShardRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
2016-01-29 20:53:56 +01:00
func ( c * Kinesis ) SplitShardRequest ( input * SplitShardInput ) ( req * request . Request , output * SplitShardOutput ) {
op := & request . Operation {
Name : opSplitShard ,
HTTPMethod : "POST" ,
HTTPPath : "/" ,
}
if input == nil {
input = & SplitShardInput { }
}
req = c . newRequest ( op , input , output )
2016-02-15 20:59:49 +01:00
req . Handlers . Unmarshal . Remove ( jsonrpc . UnmarshalHandler )
req . Handlers . Unmarshal . PushBackNamed ( protocol . UnmarshalDiscardBodyHandler )
2016-01-29 20:53:56 +01:00
output = & SplitShardOutput { }
req . Data = output
return
}
2016-05-05 03:06:27 +02:00
// Splits a shard into two new shards in the Amazon Kinesis stream to increase
// the stream's capacity to ingest and transport data. SplitShard is called
// when there is a need to increase the overall capacity of a stream because
// of an expected increase in the volume of data records being ingested.
2016-01-29 20:53:56 +01:00
//
// You can also use SplitShard when a shard appears to be approaching its maximum
2016-05-05 03:06:27 +02:00
// utilization; for example, the producers sending data into the specific shard
// are suddenly sending more than previously anticipated. You can also call
// SplitShard to increase stream capacity, so that more Amazon Kinesis applications
// can simultaneously read data from the stream for real-time processing.
2016-01-29 20:53:56 +01:00
//
// You must specify the shard to be split and the new hash key, which is the
// position in the shard where the shard gets split in two. In many cases, the
// new hash key might simply be the average of the beginning and ending hash
// key, but it can be any hash key value in the range being mapped into the
// shard. For more information about splitting shards, see Split a Shard (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html)
2016-05-05 03:06:27 +02:00
// in the Amazon Kinesis Streams Developer Guide.
2016-01-29 20:53:56 +01:00
//
// You can use DescribeStream to determine the shard ID and hash key values
// for the ShardToSplit and NewStartingHashKey parameters that are specified
// in the SplitShard request.
//
// SplitShard is an asynchronous operation. Upon receiving a SplitShard request,
// Amazon Kinesis immediately returns a response and sets the stream status
// to UPDATING. After the operation is completed, Amazon Kinesis sets the stream
// status to ACTIVE. Read and write operations continue to work while the stream
// is in the UPDATING state.
//
// You can use DescribeStream to check the status of the stream, which is returned
// in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard.
// If a stream is in CREATING or UPDATING or DELETING states, DescribeStream
// returns a ResourceInUseException.
//
// If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException.
// If you try to create more shards than are authorized for your account, you
// receive a LimitExceededException.
//
2016-05-05 03:06:27 +02:00
// For the default shard limit for an AWS account, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
// in the Amazon Kinesis Streams Developer Guide. If you need to increase this
// limit, contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
2016-01-29 20:53:56 +01:00
//
2016-05-05 03:06:27 +02:00
// If you try to operate on too many streams simultaneously using CreateStream,
// DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.
2016-01-29 20:53:56 +01:00
//
// SplitShard has limit of 5 transactions per second per account.
func ( c * Kinesis ) SplitShard ( input * SplitShardInput ) ( * SplitShardOutput , error ) {
req , out := c . SplitShardRequest ( input )
err := req . Send ( )
return out , err
}
// Represents the input for AddTagsToStream.
type AddTagsToStreamInput struct {
_ struct { } ` type:"structure" `
// The name of the stream.
StreamName * string ` min:"1" type:"string" required:"true" `
// The set of key-value pairs to use to create the tags.
Tags map [ string ] * string ` min:"1" type:"map" required:"true" `
}
// String returns the string representation
func ( s AddTagsToStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s AddTagsToStreamInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * AddTagsToStreamInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "AddTagsToStreamInput" }
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if s . Tags == nil {
invalidParams . Add ( request . NewErrParamRequired ( "Tags" ) )
}
if s . Tags != nil && len ( s . Tags ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "Tags" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
type AddTagsToStreamOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s AddTagsToStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s AddTagsToStreamOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for CreateStream.
type CreateStreamInput struct {
_ struct { } ` type:"structure" `
// The number of shards that the stream will use. The throughput of the stream
// is a function of the number of shards; more shards are required for greater
// provisioned throughput.
//
// DefaultShardLimit;
ShardCount * int64 ` min:"1" type:"integer" required:"true" `
// A name to identify the stream. The stream name is scoped to the AWS account
// used by the application that creates the stream. It is also scoped by region.
// That is, two streams in two different AWS accounts can have the same name,
2016-05-05 03:06:27 +02:00
// and two streams in the same AWS account but in two different regions can
2016-01-29 20:53:56 +01:00
// have the same name.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s CreateStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s CreateStreamInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * CreateStreamInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "CreateStreamInput" }
if s . ShardCount == nil {
invalidParams . Add ( request . NewErrParamRequired ( "ShardCount" ) )
}
if s . ShardCount != nil && * s . ShardCount < 1 {
invalidParams . Add ( request . NewErrParamMinValue ( "ShardCount" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
type CreateStreamOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s CreateStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s CreateStreamOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for DecreaseStreamRetentionPeriod.
type DecreaseStreamRetentionPeriodInput struct {
_ struct { } ` type:"structure" `
// The new retention period of the stream, in hours. Must be less than the current
// retention period.
RetentionPeriodHours * int64 ` min:"24" type:"integer" required:"true" `
// The name of the stream to modify.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s DecreaseStreamRetentionPeriodInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DecreaseStreamRetentionPeriodInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * DecreaseStreamRetentionPeriodInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "DecreaseStreamRetentionPeriodInput" }
if s . RetentionPeriodHours == nil {
invalidParams . Add ( request . NewErrParamRequired ( "RetentionPeriodHours" ) )
}
if s . RetentionPeriodHours != nil && * s . RetentionPeriodHours < 24 {
invalidParams . Add ( request . NewErrParamMinValue ( "RetentionPeriodHours" , 24 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
type DecreaseStreamRetentionPeriodOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s DecreaseStreamRetentionPeriodOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DecreaseStreamRetentionPeriodOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for DeleteStream.
type DeleteStreamInput struct {
_ struct { } ` type:"structure" `
// The name of the stream to delete.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s DeleteStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DeleteStreamInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * DeleteStreamInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "DeleteStreamInput" }
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
type DeleteStreamOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s DeleteStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DeleteStreamOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for DescribeStream.
type DescribeStreamInput struct {
_ struct { } ` type:"structure" `
// The shard ID of the shard to start with.
ExclusiveStartShardId * string ` min:"1" type:"string" `
// The maximum number of shards to return.
Limit * int64 ` min:"1" type:"integer" `
// The name of the stream to describe.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s DescribeStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DescribeStreamInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * DescribeStreamInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "DescribeStreamInput" }
if s . ExclusiveStartShardId != nil && len ( * s . ExclusiveStartShardId ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ExclusiveStartShardId" , 1 ) )
}
if s . Limit != nil && * s . Limit < 1 {
invalidParams . Add ( request . NewErrParamMinValue ( "Limit" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
// Represents the output for DescribeStream.
type DescribeStreamOutput struct {
_ struct { } ` type:"structure" `
// The current status of the stream, the stream ARN, an array of shard objects
// that comprise the stream, and states whether there are more shards available.
StreamDescription * StreamDescription ` type:"structure" required:"true" `
}
// String returns the string representation
func ( s DescribeStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DescribeStreamOutput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Represents the input for DisableEnhancedMonitoring.
type DisableEnhancedMonitoringInput struct {
_ struct { } ` type:"structure" `
// List of shard-level metrics to disable.
//
// The following are the valid shard-level metrics. The value "ALL" disables
// every metric.
//
// IncomingBytes IncomingRecords OutgoingBytes OutgoingRecords WriteProvisionedThroughputExceeded
// ReadProvisionedThroughputExceeded IteratorAgeMilliseconds ALL For
// more information, see Monitoring the Amazon Kinesis Streams Service with
// Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html)
// in the Amazon Kinesis Streams Developer Guide.
ShardLevelMetrics [ ] * string ` min:"1" type:"list" required:"true" `
// The name of the Amazon Kinesis stream for which to disable enhanced monitoring.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s DisableEnhancedMonitoringInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s DisableEnhancedMonitoringInput ) GoString ( ) string {
return s . String ( )
}
// Validate inspects the fields of the type to determine if they are valid.
func ( s * DisableEnhancedMonitoringInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "DisableEnhancedMonitoringInput" }
if s . ShardLevelMetrics == nil {
invalidParams . Add ( request . NewErrParamRequired ( "ShardLevelMetrics" ) )
}
if s . ShardLevelMetrics != nil && len ( s . ShardLevelMetrics ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ShardLevelMetrics" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
// Represents the input for EnableEnhancedMonitoring.
type EnableEnhancedMonitoringInput struct {
_ struct { } ` type:"structure" `
// List of shard-level metrics to enable.
//
// The following are the valid shard-level metrics. The value "ALL" enables
// every metric.
//
// IncomingBytes IncomingRecords OutgoingBytes OutgoingRecords WriteProvisionedThroughputExceeded
// ReadProvisionedThroughputExceeded IteratorAgeMilliseconds ALL For
// more information, see Monitoring the Amazon Kinesis Streams Service with
// Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html)
// in the Amazon Kinesis Streams Developer Guide.
ShardLevelMetrics [ ] * string ` min:"1" type:"list" required:"true" `
// The name of the stream for which to enable enhanced monitoring.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s EnableEnhancedMonitoringInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s EnableEnhancedMonitoringInput ) GoString ( ) string {
return s . String ( )
}
// Validate inspects the fields of the type to determine if they are valid.
func ( s * EnableEnhancedMonitoringInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "EnableEnhancedMonitoringInput" }
if s . ShardLevelMetrics == nil {
invalidParams . Add ( request . NewErrParamRequired ( "ShardLevelMetrics" ) )
}
if s . ShardLevelMetrics != nil && len ( s . ShardLevelMetrics ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ShardLevelMetrics" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
// Represents enhanced metrics types.
type EnhancedMetrics struct {
_ struct { } ` type:"structure" `
// List of shard-level metrics.
//
// The following are the valid shard-level metrics. The value "ALL" enhances
// every metric.
//
// IncomingBytes IncomingRecords OutgoingBytes OutgoingRecords WriteProvisionedThroughputExceeded
// ReadProvisionedThroughputExceeded IteratorAgeMilliseconds ALL For
// more information, see Monitoring the Amazon Kinesis Streams Service with
// Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html)
// in the Amazon Kinesis Streams Developer Guide.
ShardLevelMetrics [ ] * string ` min:"1" type:"list" `
}
// String returns the string representation
func ( s EnhancedMetrics ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s EnhancedMetrics ) GoString ( ) string {
return s . String ( )
}
// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring.
type EnhancedMonitoringOutput struct {
_ struct { } ` type:"structure" `
// Represents the current state of the metrics that are in the enhanced state
// before the operation.
CurrentShardLevelMetrics [ ] * string ` min:"1" type:"list" `
// Represents the list of all the metrics that would be in the enhanced state
// after the operation.
DesiredShardLevelMetrics [ ] * string ` min:"1" type:"list" `
// The name of the Amazon Kinesis stream.
StreamName * string ` min:"1" type:"string" `
}
// String returns the string representation
func ( s EnhancedMonitoringOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s EnhancedMonitoringOutput ) GoString ( ) string {
return s . String ( )
}
2016-01-29 20:53:56 +01:00
// Represents the input for GetRecords.
type GetRecordsInput struct {
_ struct { } ` type:"structure" `
// The maximum number of records to return. Specify a value of up to 10,000.
// If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException.
Limit * int64 ` min:"1" type:"integer" `
// The position in the shard from which you want to start sequentially reading
// data records. A shard iterator specifies this position using the sequence
// number of a data record in the shard.
ShardIterator * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s GetRecordsInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s GetRecordsInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * GetRecordsInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "GetRecordsInput" }
if s . Limit != nil && * s . Limit < 1 {
invalidParams . Add ( request . NewErrParamMinValue ( "Limit" , 1 ) )
}
if s . ShardIterator == nil {
invalidParams . Add ( request . NewErrParamRequired ( "ShardIterator" ) )
}
if s . ShardIterator != nil && len ( * s . ShardIterator ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ShardIterator" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
// Represents the output for GetRecords.
type GetRecordsOutput struct {
_ struct { } ` type:"structure" `
// The number of milliseconds the GetRecords response is from the tip of the
// stream, indicating how far behind current time the consumer is. A value of
// zero indicates record processing is caught up, and there are no new records
// to process at this moment.
MillisBehindLatest * int64 ` type:"long" `
// The next position in the shard from which to start sequentially reading data
// records. If set to null, the shard has been closed and the requested iterator
// will not return any more data.
NextShardIterator * string ` min:"1" type:"string" `
// The data records retrieved from the shard.
Records [ ] * Record ` type:"list" required:"true" `
}
// String returns the string representation
func ( s GetRecordsOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s GetRecordsOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for GetShardIterator.
type GetShardIteratorInput struct {
_ struct { } ` type:"structure" `
2016-05-05 03:06:27 +02:00
// The shard ID of the Amazon Kinesis shard to get the iterator for.
2016-01-29 20:53:56 +01:00
ShardId * string ` min:"1" type:"string" required:"true" `
// Determines how the shard iterator is used to start reading data records from
// the shard.
//
2016-05-05 03:06:27 +02:00
// The following are the valid Amazon Kinesis shard iterator types:
2016-01-29 20:53:56 +01:00
//
2016-05-05 03:06:27 +02:00
// AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific
// sequence number, provided in the value StartingSequenceNumber. AFTER_SEQUENCE_NUMBER
// - Start reading right after the position denoted by a specific sequence number,
// provided in the value StartingSequenceNumber. AT_TIMESTAMP - Start reading
// from the position denoted by a specific timestamp, provided in the value
// Timestamp. TRIM_HORIZON - Start reading at the last untrimmed record in the
// shard in the system, which is the oldest data record in the shard. LATEST
// - Start reading just after the most recent record in the shard, so that you
// always read the most recent data in the shard.
2016-01-29 20:53:56 +01:00
ShardIteratorType * string ` type:"string" required:"true" enum:"ShardIteratorType" `
2016-05-05 03:06:27 +02:00
// The sequence number of the data record in the shard from which to start reading.
// Used with shard iterator type AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER.
2016-01-29 20:53:56 +01:00
StartingSequenceNumber * string ` type:"string" `
2016-05-05 03:06:27 +02:00
// The name of the Amazon Kinesis stream.
2016-01-29 20:53:56 +01:00
StreamName * string ` min:"1" type:"string" required:"true" `
2016-05-05 03:06:27 +02:00
// The timestamp of the data record from which to start reading. Used with shard
// iterator type AT_TIMESTAMP. A timestamp is the Unix epoch date with precision
// in milliseconds. For example, 2016-04-04T19:58:46.480-00:00 or 1459799926.480.
// If a record with this exact timestamp does not exist, the iterator returned
// is for the next (later) record. If the timestamp is older than the current
// trim horizon, the iterator returned is for the oldest untrimmed data record
// (TRIM_HORIZON).
Timestamp * time . Time ` type:"timestamp" timestampFormat:"unix" `
2016-01-29 20:53:56 +01:00
}
// String returns the string representation
func ( s GetShardIteratorInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s GetShardIteratorInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * GetShardIteratorInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "GetShardIteratorInput" }
if s . ShardId == nil {
invalidParams . Add ( request . NewErrParamRequired ( "ShardId" ) )
}
if s . ShardId != nil && len ( * s . ShardId ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ShardId" , 1 ) )
}
if s . ShardIteratorType == nil {
invalidParams . Add ( request . NewErrParamRequired ( "ShardIteratorType" ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
// Represents the output for GetShardIterator.
type GetShardIteratorOutput struct {
_ struct { } ` type:"structure" `
// The position in the shard from which to start reading data records sequentially.
// A shard iterator specifies this position using the sequence number of a data
// record in a shard.
ShardIterator * string ` min:"1" type:"string" `
}
// String returns the string representation
func ( s GetShardIteratorOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s GetShardIteratorOutput ) GoString ( ) string {
return s . String ( )
}
// The range of possible hash key values for the shard, which is a set of ordered
// contiguous positive integers.
type HashKeyRange struct {
_ struct { } ` type:"structure" `
// The ending hash key of the hash key range.
EndingHashKey * string ` type:"string" required:"true" `
// The starting hash key of the hash key range.
StartingHashKey * string ` type:"string" required:"true" `
}
// String returns the string representation
func ( s HashKeyRange ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s HashKeyRange ) GoString ( ) string {
return s . String ( )
}
// Represents the input for IncreaseStreamRetentionPeriod.
type IncreaseStreamRetentionPeriodInput struct {
_ struct { } ` type:"structure" `
// The new retention period of the stream, in hours. Must be more than the current
// retention period.
RetentionPeriodHours * int64 ` min:"24" type:"integer" required:"true" `
// The name of the stream to modify.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s IncreaseStreamRetentionPeriodInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s IncreaseStreamRetentionPeriodInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * IncreaseStreamRetentionPeriodInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "IncreaseStreamRetentionPeriodInput" }
if s . RetentionPeriodHours == nil {
invalidParams . Add ( request . NewErrParamRequired ( "RetentionPeriodHours" ) )
}
if s . RetentionPeriodHours != nil && * s . RetentionPeriodHours < 24 {
invalidParams . Add ( request . NewErrParamMinValue ( "RetentionPeriodHours" , 24 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
type IncreaseStreamRetentionPeriodOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s IncreaseStreamRetentionPeriodOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s IncreaseStreamRetentionPeriodOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for ListStreams.
type ListStreamsInput struct {
_ struct { } ` type:"structure" `
// The name of the stream to start the list with.
ExclusiveStartStreamName * string ` min:"1" type:"string" `
// The maximum number of streams to list.
Limit * int64 ` min:"1" type:"integer" `
}
// String returns the string representation
func ( s ListStreamsInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s ListStreamsInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * ListStreamsInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "ListStreamsInput" }
if s . ExclusiveStartStreamName != nil && len ( * s . ExclusiveStartStreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ExclusiveStartStreamName" , 1 ) )
}
if s . Limit != nil && * s . Limit < 1 {
invalidParams . Add ( request . NewErrParamMinValue ( "Limit" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
// Represents the output for ListStreams.
type ListStreamsOutput struct {
_ struct { } ` type:"structure" `
// If set to true, there are more streams available to list.
HasMoreStreams * bool ` type:"boolean" required:"true" `
// The names of the streams that are associated with the AWS account making
// the ListStreams request.
StreamNames [ ] * string ` type:"list" required:"true" `
}
// String returns the string representation
func ( s ListStreamsOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s ListStreamsOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for ListTagsForStream.
type ListTagsForStreamInput struct {
_ struct { } ` type:"structure" `
// The key to use as the starting point for the list of tags. If this parameter
// is set, ListTagsForStream gets all tags that occur after ExclusiveStartTagKey.
ExclusiveStartTagKey * string ` min:"1" type:"string" `
// The number of tags to return. If this number is less than the total number
// of tags associated with the stream, HasMoreTags is set to true. To list additional
// tags, set ExclusiveStartTagKey to the last key in the response.
Limit * int64 ` min:"1" type:"integer" `
// The name of the stream.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s ListTagsForStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s ListTagsForStreamInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * ListTagsForStreamInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "ListTagsForStreamInput" }
if s . ExclusiveStartTagKey != nil && len ( * s . ExclusiveStartTagKey ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ExclusiveStartTagKey" , 1 ) )
}
if s . Limit != nil && * s . Limit < 1 {
invalidParams . Add ( request . NewErrParamMinValue ( "Limit" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
// Represents the output for ListTagsForStream.
type ListTagsForStreamOutput struct {
_ struct { } ` type:"structure" `
// If set to true, more tags are available. To request additional tags, set
// ExclusiveStartTagKey to the key of the last tag returned.
HasMoreTags * bool ` type:"boolean" required:"true" `
// A list of tags associated with StreamName, starting with the first tag after
// ExclusiveStartTagKey and up to the specified Limit.
Tags [ ] * Tag ` type:"list" required:"true" `
}
// String returns the string representation
func ( s ListTagsForStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s ListTagsForStreamOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for MergeShards.
type MergeShardsInput struct {
_ struct { } ` type:"structure" `
// The shard ID of the adjacent shard for the merge.
AdjacentShardToMerge * string ` min:"1" type:"string" required:"true" `
// The shard ID of the shard to combine with the adjacent shard for the merge.
ShardToMerge * string ` min:"1" type:"string" required:"true" `
// The name of the stream for the merge.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s MergeShardsInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s MergeShardsInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * MergeShardsInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "MergeShardsInput" }
if s . AdjacentShardToMerge == nil {
invalidParams . Add ( request . NewErrParamRequired ( "AdjacentShardToMerge" ) )
}
if s . AdjacentShardToMerge != nil && len ( * s . AdjacentShardToMerge ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "AdjacentShardToMerge" , 1 ) )
}
if s . ShardToMerge == nil {
invalidParams . Add ( request . NewErrParamRequired ( "ShardToMerge" ) )
}
if s . ShardToMerge != nil && len ( * s . ShardToMerge ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ShardToMerge" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
type MergeShardsOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s MergeShardsOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s MergeShardsOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the input for PutRecord.
type PutRecordInput struct {
_ struct { } ` type:"structure" `
// The data blob to put into the record, which is base64-encoded when the blob
// is serialized. When the data blob (the payload before base64-encoding) is
// added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
2016-03-18 20:35:09 +01:00
//
// Data is automatically base64 encoded/decoded by the SDK.
2016-01-29 20:53:56 +01:00
Data [ ] byte ` type:"blob" required:"true" `
// The hash value used to explicitly determine the shard the data record is
// assigned to by overriding the partition key hash.
ExplicitHashKey * string ` type:"string" `
// Determines which shard in the stream the data record is assigned to. Partition
// keys are Unicode strings with a maximum length limit of 256 characters for
// each key. Amazon Kinesis uses the partition key as input to a hash function
// that maps the partition key and associated data to a specific shard. Specifically,
// an MD5 hash function is used to map partition keys to 128-bit integer values
// and to map associated data records to shards. As a result of this hashing
2016-05-05 03:06:27 +02:00
// mechanism, all data records with the same partition key map to the same shard
// within the stream.
2016-01-29 20:53:56 +01:00
PartitionKey * string ` min:"1" type:"string" required:"true" `
// Guarantees strictly increasing sequence numbers, for puts from the same client
// and to the same partition key. Usage: set the SequenceNumberForOrdering of
// record n to the sequence number of record n-1 (as returned in the result
// when putting record n-1). If this parameter is not set, records will be coarsely
// ordered based on arrival time.
SequenceNumberForOrdering * string ` type:"string" `
// The name of the stream to put the data record into.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s PutRecordInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * PutRecordInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "PutRecordInput" }
if s . Data == nil {
invalidParams . Add ( request . NewErrParamRequired ( "Data" ) )
}
if s . PartitionKey == nil {
invalidParams . Add ( request . NewErrParamRequired ( "PartitionKey" ) )
}
if s . PartitionKey != nil && len ( * s . PartitionKey ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "PartitionKey" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
// Represents the output for PutRecord.
type PutRecordOutput struct {
_ struct { } ` type:"structure" `
// The sequence number identifier that was assigned to the put data record.
// The sequence number for the record is unique across all records in the stream.
// A sequence number is the identifier associated with every record put into
// the stream.
SequenceNumber * string ` type:"string" required:"true" `
// The shard ID of the shard where the data record was placed.
ShardId * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s PutRecordOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordOutput ) GoString ( ) string {
return s . String ( )
}
// A PutRecords request.
type PutRecordsInput struct {
_ struct { } ` type:"structure" `
// The records associated with the request.
Records [ ] * PutRecordsRequestEntry ` min:"1" type:"list" required:"true" `
// The stream name associated with the request.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s PutRecordsInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordsInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * PutRecordsInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "PutRecordsInput" }
if s . Records == nil {
invalidParams . Add ( request . NewErrParamRequired ( "Records" ) )
}
if s . Records != nil && len ( s . Records ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "Records" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if s . Records != nil {
for i , v := range s . Records {
if v == nil {
continue
}
if err := v . Validate ( ) ; err != nil {
invalidParams . AddNested ( fmt . Sprintf ( "%s[%v]" , "Records" , i ) , err . ( request . ErrInvalidParams ) )
}
}
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
// PutRecords results.
type PutRecordsOutput struct {
_ struct { } ` type:"structure" `
// The number of unsuccessfully processed records in a PutRecords request.
FailedRecordCount * int64 ` min:"1" type:"integer" `
// An array of successfully and unsuccessfully processed record results, correlated
// with the request by natural ordering. A record that is successfully added
2016-05-05 03:06:27 +02:00
// to a stream includes SequenceNumber and ShardId in the result. A record that
// fails to be added to a stream includes ErrorCode and ErrorMessage in the
// result.
2016-01-29 20:53:56 +01:00
Records [ ] * PutRecordsResultEntry ` min:"1" type:"list" required:"true" `
}
// String returns the string representation
func ( s PutRecordsOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordsOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the output for PutRecords.
type PutRecordsRequestEntry struct {
_ struct { } ` type:"structure" `
// The data blob to put into the record, which is base64-encoded when the blob
// is serialized. When the data blob (the payload before base64-encoding) is
// added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
2016-03-18 20:35:09 +01:00
//
// Data is automatically base64 encoded/decoded by the SDK.
2016-01-29 20:53:56 +01:00
Data [ ] byte ` type:"blob" required:"true" `
// The hash value used to determine explicitly the shard that the data record
// is assigned to by overriding the partition key hash.
ExplicitHashKey * string ` type:"string" `
// Determines which shard in the stream the data record is assigned to. Partition
// keys are Unicode strings with a maximum length limit of 256 characters for
// each key. Amazon Kinesis uses the partition key as input to a hash function
// that maps the partition key and associated data to a specific shard. Specifically,
// an MD5 hash function is used to map partition keys to 128-bit integer values
// and to map associated data records to shards. As a result of this hashing
// mechanism, all data records with the same partition key map to the same shard
// within the stream.
PartitionKey * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s PutRecordsRequestEntry ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordsRequestEntry ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * PutRecordsRequestEntry ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "PutRecordsRequestEntry" }
if s . Data == nil {
invalidParams . Add ( request . NewErrParamRequired ( "Data" ) )
}
if s . PartitionKey == nil {
invalidParams . Add ( request . NewErrParamRequired ( "PartitionKey" ) )
}
if s . PartitionKey != nil && len ( * s . PartitionKey ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "PartitionKey" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
// Represents the result of an individual record from a PutRecords request.
2016-05-05 03:06:27 +02:00
// A record that is successfully added to a stream includes SequenceNumber and
// ShardId in the result. A record that fails to be added to the stream includes
// ErrorCode and ErrorMessage in the result.
2016-01-29 20:53:56 +01:00
type PutRecordsResultEntry struct {
_ struct { } ` type:"structure" `
// The error code for an individual record result. ErrorCodes can be either
// ProvisionedThroughputExceededException or InternalFailure.
ErrorCode * string ` type:"string" `
// The error message for an individual record result. An ErrorCode value of
// ProvisionedThroughputExceededException has an error message that includes
// the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure
// has the error message "Internal Service Failure".
ErrorMessage * string ` type:"string" `
// The sequence number for an individual record result.
SequenceNumber * string ` type:"string" `
// The shard ID for an individual record result.
ShardId * string ` min:"1" type:"string" `
}
// String returns the string representation
func ( s PutRecordsResultEntry ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s PutRecordsResultEntry ) GoString ( ) string {
return s . String ( )
}
// The unit of data of the Amazon Kinesis stream, which is composed of a sequence
// number, a partition key, and a data blob.
type Record struct {
_ struct { } ` type:"structure" `
// The approximate time that the record was inserted into the stream.
ApproximateArrivalTimestamp * time . Time ` type:"timestamp" timestampFormat:"unix" `
// The data blob. The data in the blob is both opaque and immutable to the Amazon
// Kinesis service, which does not inspect, interpret, or change the data in
// the blob in any way. When the data blob (the payload before base64-encoding)
// is added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
2016-03-18 20:35:09 +01:00
//
// Data is automatically base64 encoded/decoded by the SDK.
2016-01-29 20:53:56 +01:00
Data [ ] byte ` type:"blob" required:"true" `
// Identifies which shard in the stream the data record is assigned to.
PartitionKey * string ` min:"1" type:"string" required:"true" `
// The unique identifier of the record in the stream.
SequenceNumber * string ` type:"string" required:"true" `
}
// String returns the string representation
func ( s Record ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s Record ) GoString ( ) string {
return s . String ( )
}
// Represents the input for RemoveTagsFromStream.
type RemoveTagsFromStreamInput struct {
_ struct { } ` type:"structure" `
// The name of the stream.
StreamName * string ` min:"1" type:"string" required:"true" `
// A list of tag keys. Each corresponding tag is removed from the stream.
TagKeys [ ] * string ` min:"1" type:"list" required:"true" `
}
// String returns the string representation
func ( s RemoveTagsFromStreamInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s RemoveTagsFromStreamInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * RemoveTagsFromStreamInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "RemoveTagsFromStreamInput" }
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if s . TagKeys == nil {
invalidParams . Add ( request . NewErrParamRequired ( "TagKeys" ) )
}
if s . TagKeys != nil && len ( s . TagKeys ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "TagKeys" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
type RemoveTagsFromStreamOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s RemoveTagsFromStreamOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s RemoveTagsFromStreamOutput ) GoString ( ) string {
return s . String ( )
}
// The range of possible sequence numbers for the shard.
type SequenceNumberRange struct {
_ struct { } ` type:"structure" `
// The ending sequence number for the range. Shards that are in the OPEN state
// have an ending sequence number of null.
EndingSequenceNumber * string ` type:"string" `
// The starting sequence number for the range.
StartingSequenceNumber * string ` type:"string" required:"true" `
}
// String returns the string representation
func ( s SequenceNumberRange ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s SequenceNumberRange ) GoString ( ) string {
return s . String ( )
}
// A uniquely identified group of data records in an Amazon Kinesis stream.
type Shard struct {
_ struct { } ` type:"structure" `
2016-05-05 03:06:27 +02:00
// The shard ID of the shard adjacent to the shard's parent.
2016-01-29 20:53:56 +01:00
AdjacentParentShardId * string ` min:"1" type:"string" `
// The range of possible hash key values for the shard, which is a set of ordered
// contiguous positive integers.
HashKeyRange * HashKeyRange ` type:"structure" required:"true" `
2016-05-05 03:06:27 +02:00
// The shard ID of the shard's parent.
2016-01-29 20:53:56 +01:00
ParentShardId * string ` min:"1" type:"string" `
// The range of possible sequence numbers for the shard.
SequenceNumberRange * SequenceNumberRange ` type:"structure" required:"true" `
2016-05-05 03:06:27 +02:00
// The unique identifier of the shard within the stream.
2016-01-29 20:53:56 +01:00
ShardId * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s Shard ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s Shard ) GoString ( ) string {
return s . String ( )
}
// Represents the input for SplitShard.
type SplitShardInput struct {
_ struct { } ` type:"structure" `
// A hash key value for the starting hash key of one of the child shards created
// by the split. The hash key range for a given shard constitutes a set of ordered
// contiguous positive integers. The value for NewStartingHashKey must be in
// the range of hash keys being mapped into the shard. The NewStartingHashKey
// hash key value and all higher hash key values in hash key range are distributed
// to one of the child shards. All the lower hash key values in the range are
// distributed to the other child shard.
NewStartingHashKey * string ` type:"string" required:"true" `
// The shard ID of the shard to split.
ShardToSplit * string ` min:"1" type:"string" required:"true" `
// The name of the stream for the shard split.
StreamName * string ` min:"1" type:"string" required:"true" `
}
// String returns the string representation
func ( s SplitShardInput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s SplitShardInput ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
// Validate inspects the fields of the type to determine if they are valid.
func ( s * SplitShardInput ) Validate ( ) error {
invalidParams := request . ErrInvalidParams { Context : "SplitShardInput" }
if s . NewStartingHashKey == nil {
invalidParams . Add ( request . NewErrParamRequired ( "NewStartingHashKey" ) )
}
if s . ShardToSplit == nil {
invalidParams . Add ( request . NewErrParamRequired ( "ShardToSplit" ) )
}
if s . ShardToSplit != nil && len ( * s . ShardToSplit ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "ShardToSplit" , 1 ) )
}
if s . StreamName == nil {
invalidParams . Add ( request . NewErrParamRequired ( "StreamName" ) )
}
if s . StreamName != nil && len ( * s . StreamName ) < 1 {
invalidParams . Add ( request . NewErrParamMinLen ( "StreamName" , 1 ) )
}
if invalidParams . Len ( ) > 0 {
return invalidParams
}
return nil
}
2016-01-29 20:53:56 +01:00
type SplitShardOutput struct {
_ struct { } ` type:"structure" `
}
// String returns the string representation
func ( s SplitShardOutput ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s SplitShardOutput ) GoString ( ) string {
return s . String ( )
}
// Represents the output for DescribeStream.
type StreamDescription struct {
_ struct { } ` type:"structure" `
2016-05-05 03:06:27 +02:00
// Represents the current enhanced monitoring settings of the stream.
EnhancedMonitoring [ ] * EnhancedMetrics ` type:"list" required:"true" `
2016-01-29 20:53:56 +01:00
// If set to true, more shards in the stream are available to describe.
HasMoreShards * bool ` type:"boolean" required:"true" `
// The current retention period, in hours.
RetentionPeriodHours * int64 ` min:"24" type:"integer" required:"true" `
// The shards that comprise the stream.
Shards [ ] * Shard ` type:"list" required:"true" `
// The Amazon Resource Name (ARN) for the stream being described.
StreamARN * string ` type:"string" required:"true" `
// The name of the stream being described.
StreamName * string ` min:"1" type:"string" required:"true" `
2016-05-05 03:06:27 +02:00
// The current status of the stream being described. The stream status is one
// of the following states:
2016-01-29 20:53:56 +01:00
//
2016-05-05 03:06:27 +02:00
// CREATING - The stream is being created. Amazon Kinesis immediately returns
// and sets StreamStatus to CREATING. DELETING - The stream is being deleted.
2016-01-29 20:53:56 +01:00
// The specified stream is in the DELETING state until Amazon Kinesis completes
2016-05-05 03:06:27 +02:00
// the deletion. ACTIVE - The stream exists and is ready for read and write
2016-01-29 20:53:56 +01:00
// operations or deletion. You should perform read and write operations only
2016-05-05 03:06:27 +02:00
// on an ACTIVE stream. UPDATING - Shards in the stream are being merged or
2016-01-29 20:53:56 +01:00
// split. Read and write operations continue to work while the stream is in
// the UPDATING state.
StreamStatus * string ` type:"string" required:"true" enum:"StreamStatus" `
}
// String returns the string representation
func ( s StreamDescription ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s StreamDescription ) GoString ( ) string {
return s . String ( )
}
// Metadata assigned to the stream, consisting of a key-value pair.
type Tag struct {
_ struct { } ` type:"structure" `
// A unique identifier for the tag. Maximum length: 128 characters. Valid characters:
// Unicode letters, digits, white space, _ . / = + - % @
Key * string ` min:"1" type:"string" required:"true" `
// An optional string, typically used to describe or define the tag. Maximum
// length: 256 characters. Valid characters: Unicode letters, digits, white
// space, _ . / = + - % @
Value * string ` type:"string" `
}
// String returns the string representation
func ( s Tag ) String ( ) string {
return awsutil . Prettify ( s )
}
// GoString returns the string representation
func ( s Tag ) GoString ( ) string {
return s . String ( )
}
2016-05-05 03:06:27 +02:00
const (
// @enum MetricsName
MetricsNameIncomingBytes = "IncomingBytes"
// @enum MetricsName
MetricsNameIncomingRecords = "IncomingRecords"
// @enum MetricsName
MetricsNameOutgoingBytes = "OutgoingBytes"
// @enum MetricsName
MetricsNameOutgoingRecords = "OutgoingRecords"
// @enum MetricsName
MetricsNameWriteProvisionedThroughputExceeded = "WriteProvisionedThroughputExceeded"
// @enum MetricsName
MetricsNameReadProvisionedThroughputExceeded = "ReadProvisionedThroughputExceeded"
// @enum MetricsName
MetricsNameIteratorAgeMilliseconds = "IteratorAgeMilliseconds"
// @enum MetricsName
MetricsNameAll = "ALL"
)
2016-01-29 20:53:56 +01:00
const (
// @enum ShardIteratorType
ShardIteratorTypeAtSequenceNumber = "AT_SEQUENCE_NUMBER"
// @enum ShardIteratorType
ShardIteratorTypeAfterSequenceNumber = "AFTER_SEQUENCE_NUMBER"
// @enum ShardIteratorType
ShardIteratorTypeTrimHorizon = "TRIM_HORIZON"
// @enum ShardIteratorType
ShardIteratorTypeLatest = "LATEST"
2016-05-05 03:06:27 +02:00
// @enum ShardIteratorType
ShardIteratorTypeAtTimestamp = "AT_TIMESTAMP"
2016-01-29 20:53:56 +01:00
)
const (
// @enum StreamStatus
StreamStatusCreating = "CREATING"
// @enum StreamStatus
StreamStatusDeleting = "DELETING"
// @enum StreamStatus
StreamStatusActive = "ACTIVE"
// @enum StreamStatus
StreamStatusUpdating = "UPDATING"
)