git-off

git off handles large files in git repos
git clone https://noulin.net/git/git-off.git
Log | Files | Refs | README

kinesis.d.ts (76972B)


      1 import {Request} from '../lib/request';
      2 import {Response} from '../lib/response';
      3 import {AWSError} from '../lib/error';
      4 import {Service} from '../lib/service';
      5 import {ServiceConfigurationOptions} from '../lib/service';
      6 import {ConfigBase as Config} from '../lib/config';
      7 interface Blob {}
      8 declare class Kinesis extends Service {
      9   /**
     10    * Constructs a service object. This object has one method for each API operation.
     11    */
     12   constructor(options?: Kinesis.Types.ClientConfiguration)
     13   config: Config & Kinesis.Types.ClientConfiguration;
     14   /**
     15    * Adds or updates tags for the specified Amazon Kinesis stream. Each stream can have up to 10 tags. If tags have already been assigned to the stream, AddTagsToStream overwrites any existing tags that correspond to the specified tag keys.
     16    */
     17   addTagsToStream(params: Kinesis.Types.AddTagsToStreamInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     18   /**
     19    * Adds or updates tags for the specified Amazon Kinesis stream. Each stream can have up to 10 tags. If tags have already been assigned to the stream, AddTagsToStream overwrites any existing tags that correspond to the specified tag keys.
     20    */
     21   addTagsToStream(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     22   /**
     23    * Creates an Amazon Kinesis stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream. You specify and control the number of shards that a stream is composed of. Each shard can support reads up to 5 transactions per second, up to a maximum data read total of 2 MB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. You can add shards to a stream if the amount of data input increases and you can remove shards if the amount of data input decreases. The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different regions, can have the same name.  CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Amazon Kinesis immediately returns and sets the stream status to CREATING. After the stream is created, Amazon Kinesis sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream.  You receive a LimitExceededException when making a CreateStream request if you try to do one of the following:  Have more than five streams in the CREATING state at any point in time.  Create more shards than are authorized for your account.   For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase this limit, contact AWS Support. You can use DescribeStream to check the stream status, which is returned in StreamStatus.  CreateStream has a limit of 5 transactions per second per account.
     24    */
     25   createStream(params: Kinesis.Types.CreateStreamInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     26   /**
     27    * Creates an Amazon Kinesis stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream. You specify and control the number of shards that a stream is composed of. Each shard can support reads up to 5 transactions per second, up to a maximum data read total of 2 MB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. You can add shards to a stream if the amount of data input increases and you can remove shards if the amount of data input decreases. The stream name identifies the stream. The name is scoped to the AWS account used by the application. It is also scoped by region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different regions, can have the same name.  CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Amazon Kinesis immediately returns and sets the stream status to CREATING. After the stream is created, Amazon Kinesis sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream.  You receive a LimitExceededException when making a CreateStream request if you try to do one of the following:  Have more than five streams in the CREATING state at any point in time.  Create more shards than are authorized for your account.   For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase this limit, contact AWS Support. You can use DescribeStream to check the stream status, which is returned in StreamStatus.  CreateStream has a limit of 5 transactions per second per account.
     28    */
     29   createStream(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     30   /**
     31    * Decreases the Amazon Kinesis stream's retention period, which is the length of time data records are accessible after they are added to the stream. The minimum value of a stream's retention period is 24 hours. This operation may result in lost data. For example, if the stream's retention period is 48 hours and is decreased to 24 hours, any data already in the stream that is older than 24 hours is inaccessible.
     32    */
     33   decreaseStreamRetentionPeriod(params: Kinesis.Types.DecreaseStreamRetentionPeriodInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     34   /**
     35    * Decreases the Amazon Kinesis stream's retention period, which is the length of time data records are accessible after they are added to the stream. The minimum value of a stream's retention period is 24 hours. This operation may result in lost data. For example, if the stream's retention period is 48 hours and is decreased to 24 hours, any data already in the stream that is older than 24 hours is inaccessible.
     36    */
     37   decreaseStreamRetentionPeriod(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     38   /**
     39    * Deletes an Amazon Kinesis stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it will receive the exception ResourceNotFoundException. If the stream is in the ACTIVE state, you can delete it. After a DeleteStream request, the specified stream is in the DELETING state until Amazon Kinesis completes the deletion.  Note: Amazon Kinesis might continue to accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the stream deletion is complete. When you delete a stream, any shards in that stream are also deleted, and any tags are dissociated from the stream. You can use the DescribeStream operation to check the state of the stream, which is returned in StreamStatus.  DeleteStream has a limit of 5 transactions per second per account.
     40    */
     41   deleteStream(params: Kinesis.Types.DeleteStreamInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     42   /**
     43    * Deletes an Amazon Kinesis stream and all its shards and data. You must shut down any applications that are operating on the stream before you delete the stream. If an application attempts to operate on a deleted stream, it will receive the exception ResourceNotFoundException. If the stream is in the ACTIVE state, you can delete it. After a DeleteStream request, the specified stream is in the DELETING state until Amazon Kinesis completes the deletion.  Note: Amazon Kinesis might continue to accept data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the stream deletion is complete. When you delete a stream, any shards in that stream are also deleted, and any tags are dissociated from the stream. You can use the DescribeStream operation to check the state of the stream, which is returned in StreamStatus.  DeleteStream has a limit of 5 transactions per second per account.
     44    */
     45   deleteStream(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     46   /**
     47    * Describes the shard limits and usage for the account. If you update your account limits, the old limits might be returned for a few minutes. This operation has a limit of 1 transaction per second per account.
     48    */
     49   describeLimits(params: Kinesis.Types.DescribeLimitsInput, callback?: (err: AWSError, data: Kinesis.Types.DescribeLimitsOutput) => void): Request<Kinesis.Types.DescribeLimitsOutput, AWSError>;
     50   /**
     51    * Describes the shard limits and usage for the account. If you update your account limits, the old limits might be returned for a few minutes. This operation has a limit of 1 transaction per second per account.
     52    */
     53   describeLimits(callback?: (err: AWSError, data: Kinesis.Types.DescribeLimitsOutput) => void): Request<Kinesis.Types.DescribeLimitsOutput, AWSError>;
     54   /**
     55    * Describes the specified Amazon Kinesis stream. The information returned includes the stream name, Amazon Resource Name (ARN), creation time, enhanced metric configuration, and shard map. The shard map is an array of shard objects. For each shard object, there is the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. Every record ingested in the stream is identified by a sequence number, which is assigned when the record is put into the stream. You can limit the number of shards returned by each call. For more information, see Retrieving Shards from a Stream in the Amazon Kinesis Streams Developer Guide. There are no guarantees about the chronological order shards returned. To process shards in chronological order, use the ID of the parent shard to track the lineage to the oldest shard. This operation has a limit of 10 transactions per second per account.
     56    */
     57   describeStream(params: Kinesis.Types.DescribeStreamInput, callback?: (err: AWSError, data: Kinesis.Types.DescribeStreamOutput) => void): Request<Kinesis.Types.DescribeStreamOutput, AWSError>;
     58   /**
     59    * Describes the specified Amazon Kinesis stream. The information returned includes the stream name, Amazon Resource Name (ARN), creation time, enhanced metric configuration, and shard map. The shard map is an array of shard objects. For each shard object, there is the hash key and sequence number ranges that the shard spans, and the IDs of any earlier shards that played in a role in creating the shard. Every record ingested in the stream is identified by a sequence number, which is assigned when the record is put into the stream. You can limit the number of shards returned by each call. For more information, see Retrieving Shards from a Stream in the Amazon Kinesis Streams Developer Guide. There are no guarantees about the chronological order shards returned. To process shards in chronological order, use the ID of the parent shard to track the lineage to the oldest shard. This operation has a limit of 10 transactions per second per account.
     60    */
     61   describeStream(callback?: (err: AWSError, data: Kinesis.Types.DescribeStreamOutput) => void): Request<Kinesis.Types.DescribeStreamOutput, AWSError>;
     62   /**
     63    * Disables enhanced monitoring.
     64    */
     65   disableEnhancedMonitoring(params: Kinesis.Types.DisableEnhancedMonitoringInput, callback?: (err: AWSError, data: Kinesis.Types.EnhancedMonitoringOutput) => void): Request<Kinesis.Types.EnhancedMonitoringOutput, AWSError>;
     66   /**
     67    * Disables enhanced monitoring.
     68    */
     69   disableEnhancedMonitoring(callback?: (err: AWSError, data: Kinesis.Types.EnhancedMonitoringOutput) => void): Request<Kinesis.Types.EnhancedMonitoringOutput, AWSError>;
     70   /**
     71    * Enables enhanced Amazon Kinesis stream monitoring for shard-level metrics.
     72    */
     73   enableEnhancedMonitoring(params: Kinesis.Types.EnableEnhancedMonitoringInput, callback?: (err: AWSError, data: Kinesis.Types.EnhancedMonitoringOutput) => void): Request<Kinesis.Types.EnhancedMonitoringOutput, AWSError>;
     74   /**
     75    * Enables enhanced Amazon Kinesis stream monitoring for shard-level metrics.
     76    */
     77   enableEnhancedMonitoring(callback?: (err: AWSError, data: Kinesis.Types.EnhancedMonitoringOutput) => void): Request<Kinesis.Types.EnhancedMonitoringOutput, AWSError>;
     78   /**
     79    * Gets data records from an Amazon Kinesis stream's shard. Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains records. You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Streams Limits in the Amazon Kinesis Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. Note that if the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process. Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit. The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the shard, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException. Note that GetRecords won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to GetRecords; however, it's possible that the application will get exceptions for longer than 1 second. To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Streams Developer Guide). Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side timestamp, whereas a client-side timestamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The timestamp has millisecond precision. There are no guarantees about the timestamp accuracy, or that the timestamp is always increasing. For example, records in a shard or across a stream might have timestamps that are out of order.
     80    */
     81   getRecords(params: Kinesis.Types.GetRecordsInput, callback?: (err: AWSError, data: Kinesis.Types.GetRecordsOutput) => void): Request<Kinesis.Types.GetRecordsOutput, AWSError>;
     82   /**
     83    * Gets data records from an Amazon Kinesis stream's shard. Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains records. You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Streams Limits in the Amazon Kinesis Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. Note that if the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process. Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit. The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the shard, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException. Note that GetRecords won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to GetRecords; however, it's possible that the application will get exceptions for longer than 1 second. To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Streams Developer Guide). Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side timestamp, whereas a client-side timestamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The timestamp has millisecond precision. There are no guarantees about the timestamp accuracy, or that the timestamp is always increasing. For example, records in a shard or across a stream might have timestamps that are out of order.
     84    */
     85   getRecords(callback?: (err: AWSError, data: Kinesis.Types.GetRecordsOutput) => void): Request<Kinesis.Types.GetRecordsOutput, AWSError>;
     86   /**
     87    * Gets an Amazon Kinesis shard iterator. A shard iterator expires five minutes after it is returned to the requester. A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards. You must specify the shard iterator type. For example, you can set the ShardIteratorType parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST so that you always read the most recent data in the shard.  When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard iterator is returned by every GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords request.  If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Streams Developer Guide. If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. Note that a shard can be closed as a result of using SplitShard or MergeShards.  GetShardIterator has a limit of 5 transactions per second per account per open shard.
     88    */
     89   getShardIterator(params: Kinesis.Types.GetShardIteratorInput, callback?: (err: AWSError, data: Kinesis.Types.GetShardIteratorOutput) => void): Request<Kinesis.Types.GetShardIteratorOutput, AWSError>;
     90   /**
     91    * Gets an Amazon Kinesis shard iterator. A shard iterator expires five minutes after it is returned to the requester. A shard iterator specifies the shard position from which to start reading data records sequentially. The position is specified using the sequence number of a data record in a shard. A sequence number is the identifier associated with every record ingested in the stream, and is assigned when a record is put into the stream. Each stream has one or more shards. You must specify the shard iterator type. For example, you can set the ShardIteratorType parameter to read exactly from the position denoted by a specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type, or right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in the shard in the system (the oldest data record in the shard), or LATEST so that you always read the most recent data in the shard.  When you read repeatedly from a stream, use a GetShardIterator request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard iterator is returned by every GetRecords request in NextShardIterator, which you use in the ShardIterator parameter of the next GetRecords request.  If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. For more information about throughput limits, see GetRecords, and Streams Limits in the Amazon Kinesis Streams Developer Guide. If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number of the shard. Note that a shard can be closed as a result of using SplitShard or MergeShards.  GetShardIterator has a limit of 5 transactions per second per account per open shard.
     92    */
     93   getShardIterator(callback?: (err: AWSError, data: Kinesis.Types.GetShardIteratorOutput) => void): Request<Kinesis.Types.GetShardIteratorOutput, AWSError>;
     94   /**
     95    * Increases the Amazon Kinesis stream's retention period, which is the length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 168 hours (7 days). Upon choosing a longer stream retention period, this operation will increase the time period records are accessible that have not yet expired. However, it will not make previous data that has expired (older than the stream's previous retention period) accessible after the operation has been called. For example, if a stream's retention period is set to 24 hours and is increased to 168 hours, any data that is older than 24 hours will remain inaccessible to consumer applications.
     96    */
     97   increaseStreamRetentionPeriod(params: Kinesis.Types.IncreaseStreamRetentionPeriodInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
     98   /**
     99    * Increases the Amazon Kinesis stream's retention period, which is the length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 168 hours (7 days). Upon choosing a longer stream retention period, this operation will increase the time period records are accessible that have not yet expired. However, it will not make previous data that has expired (older than the stream's previous retention period) accessible after the operation has been called. For example, if a stream's retention period is set to 24 hours and is increased to 168 hours, any data that is older than 24 hours will remain inaccessible to consumer applications.
    100    */
    101   increaseStreamRetentionPeriod(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
    102   /**
    103    * Lists your Amazon Kinesis streams. The number of streams may be too large to return from a single call to ListStreams. You can limit the number of returned streams using the Limit parameter. If you do not specify a value for the Limit parameter, Amazon Kinesis uses the default limit, which is currently 10. You can detect if there are more streams available to list by using the HasMoreStreams flag from the returned output. If there are more streams available, you can request more streams by using the name of the last stream returned by the ListStreams request in the ExclusiveStartStreamName parameter in a subsequent request to ListStreams. The group of stream names returned by the subsequent request is then added to the list. You can continue this process until all the stream names have been collected in the list.   ListStreams has a limit of 5 transactions per second per account.
    104    */
    105   listStreams(params: Kinesis.Types.ListStreamsInput, callback?: (err: AWSError, data: Kinesis.Types.ListStreamsOutput) => void): Request<Kinesis.Types.ListStreamsOutput, AWSError>;
    106   /**
    107    * Lists your Amazon Kinesis streams. The number of streams may be too large to return from a single call to ListStreams. You can limit the number of returned streams using the Limit parameter. If you do not specify a value for the Limit parameter, Amazon Kinesis uses the default limit, which is currently 10. You can detect if there are more streams available to list by using the HasMoreStreams flag from the returned output. If there are more streams available, you can request more streams by using the name of the last stream returned by the ListStreams request in the ExclusiveStartStreamName parameter in a subsequent request to ListStreams. The group of stream names returned by the subsequent request is then added to the list. You can continue this process until all the stream names have been collected in the list.   ListStreams has a limit of 5 transactions per second per account.
    108    */
    109   listStreams(callback?: (err: AWSError, data: Kinesis.Types.ListStreamsOutput) => void): Request<Kinesis.Types.ListStreamsOutput, AWSError>;
    110   /**
    111    * Lists the tags for the specified Amazon Kinesis stream.
    112    */
    113   listTagsForStream(params: Kinesis.Types.ListTagsForStreamInput, callback?: (err: AWSError, data: Kinesis.Types.ListTagsForStreamOutput) => void): Request<Kinesis.Types.ListTagsForStreamOutput, AWSError>;
    114   /**
    115    * Lists the tags for the specified Amazon Kinesis stream.
    116    */
    117   listTagsForStream(callback?: (err: AWSError, data: Kinesis.Types.ListTagsForStreamOutput) => void): Request<Kinesis.Types.ListTagsForStreamOutput, AWSError>;
    118   /**
    119    * Merges two adjacent shards in an Amazon Kinesis stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards.  MergeShards is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Streams Developer Guide. If the stream is in the ACTIVE state, you can call MergeShards. If a stream is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.  You can use DescribeStream to check the state of the stream, which is returned in StreamStatus.  MergeShards is an asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis immediately returns a response and sets the StreamStatus to UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.  You use DescribeStream to determine the shard IDs that are specified in the MergeShards request.  If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException.   MergeShards has limit of 5 transactions per second per account.
    120    */
    121   mergeShards(params: Kinesis.Types.MergeShardsInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
    122   /**
    123    * Merges two adjacent shards in an Amazon Kinesis stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps. For example, if you have two shards, one with a hash key range of 276...381 and the other with a hash key range of 382...454, then you could merge these two shards into a single shard that would have a hash key range of 276...454. After the merge, the single child shard receives data for all hash key values covered by the two parent shards.  MergeShards is called when there is a need to reduce the overall capacity of a stream because of excess capacity that is not being used. You must specify the shard to be merged and the adjacent shard for a stream. For more information about merging shards, see Merge Two Shards in the Amazon Kinesis Streams Developer Guide. If the stream is in the ACTIVE state, you can call MergeShards. If a stream is in the CREATING, UPDATING, or DELETING state, MergeShards returns a ResourceInUseException. If the specified stream does not exist, MergeShards returns a ResourceNotFoundException.  You can use DescribeStream to check the state of the stream, which is returned in StreamStatus.  MergeShards is an asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis immediately returns a response and sets the StreamStatus to UPDATING. After the operation is completed, Amazon Kinesis sets the StreamStatus to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.  You use DescribeStream to determine the shard IDs that are specified in the MergeShards request.  If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards or SplitShard, you will receive a LimitExceededException.   MergeShards has limit of 5 transactions per second per account.
    124    */
    125   mergeShards(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
    126   /**
    127    * Writes a single data record into an Amazon Kinesis stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself. The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to. Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.  PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record. Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide. If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.  Data records are accessible for only 24 hours from the time that they are added to a stream.
    128    */
    129   putRecord(params: Kinesis.Types.PutRecordInput, callback?: (err: AWSError, data: Kinesis.Types.PutRecordOutput) => void): Request<Kinesis.Types.PutRecordOutput, AWSError>;
    130   /**
    131    * Writes a single data record into an Amazon Kinesis stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself. The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to. Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.  PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record. Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide. If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.  Data records are accessible for only 24 hours from the time that they are added to a stream.
    132    */
    133   putRecord(callback?: (err: AWSError, data: Kinesis.Types.PutRecordOutput) => void): Request<Kinesis.Types.PutRecordOutput, AWSError>;
    134   /**
    135    * Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.  Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob. The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide. Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide. The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array. The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records. A successfully-processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream. An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide. By default, data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. This retention period can be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod operations.
    136    */
    137   putRecords(params: Kinesis.Types.PutRecordsInput, callback?: (err: AWSError, data: Kinesis.Types.PutRecordsOutput) => void): Request<Kinesis.Types.PutRecordsOutput, AWSError>;
    138   /**
    139    * Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.  Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second. You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob. The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on. The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide. Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide. The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array. The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records. A successfully-processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream. An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide. By default, data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. This retention period can be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod operations.
    140    */
    141   putRecords(callback?: (err: AWSError, data: Kinesis.Types.PutRecordsOutput) => void): Request<Kinesis.Types.PutRecordsOutput, AWSError>;
    142   /**
    143    * Removes tags from the specified Amazon Kinesis stream. Removed tags are deleted and cannot be recovered after this operation successfully completes. If you specify a tag that does not exist, it is ignored.
    144    */
    145   removeTagsFromStream(params: Kinesis.Types.RemoveTagsFromStreamInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
    146   /**
    147    * Removes tags from the specified Amazon Kinesis stream. Removed tags are deleted and cannot be recovered after this operation successfully completes. If you specify a tag that does not exist, it is ignored.
    148    */
    149   removeTagsFromStream(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
    150   /**
    151    * Splits a shard into two new shards in the Amazon Kinesis stream to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.  You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Amazon Kinesis applications can simultaneously read data from the stream for real-time processing.  You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might simply be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information about splitting shards, see Split a Shard in the Amazon Kinesis Streams Developer Guide. You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.  SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Amazon Kinesis immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Amazon Kinesis sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.  You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException. If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.  For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase this limit, contact AWS Support. If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.   SplitShard has limit of 5 transactions per second per account.
    152    */
    153   splitShard(params: Kinesis.Types.SplitShardInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
    154   /**
    155    * Splits a shard into two new shards in the Amazon Kinesis stream to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.  You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Amazon Kinesis applications can simultaneously read data from the stream for real-time processing.  You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might simply be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information about splitting shards, see Split a Shard in the Amazon Kinesis Streams Developer Guide. You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.  SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Amazon Kinesis immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Amazon Kinesis sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.  You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException. If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.  For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase this limit, contact AWS Support. If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.   SplitShard has limit of 5 transactions per second per account.
    156    */
    157   splitShard(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
    158   /**
    159    * Updates the shard count of the specified stream to the specified number of shards. Updating the shard count is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING. To update the shard count, Amazon Kinesis performs splits and merges and individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges. This operation has a rate limit of twice per rolling 24 hour period. You cannot scale above double your current shard count, scale below half your current shard count, or exceed the shard limits for your account. For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase a limit, contact AWS Support.
    160    */
    161   updateShardCount(params: Kinesis.Types.UpdateShardCountInput, callback?: (err: AWSError, data: Kinesis.Types.UpdateShardCountOutput) => void): Request<Kinesis.Types.UpdateShardCountOutput, AWSError>;
    162   /**
    163    * Updates the shard count of the specified stream to the specified number of shards. Updating the shard count is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING. To update the shard count, Amazon Kinesis performs splits and merges and individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges. This operation has a rate limit of twice per rolling 24 hour period. You cannot scale above double your current shard count, scale below half your current shard count, or exceed the shard limits for your account. For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase a limit, contact AWS Support.
    164    */
    165   updateShardCount(callback?: (err: AWSError, data: Kinesis.Types.UpdateShardCountOutput) => void): Request<Kinesis.Types.UpdateShardCountOutput, AWSError>;
    166   /**
    167    * Waits for the streamExists state by periodically calling the underlying Kinesis.describeStreamoperation every 10 seconds (at most 18 times).
    168    */
    169   waitFor(state: "streamExists", params: Kinesis.Types.DescribeStreamInput, callback?: (err: AWSError, data: Kinesis.Types.DescribeStreamOutput) => void): Request<Kinesis.Types.DescribeStreamOutput, AWSError>;
    170   /**
    171    * Waits for the streamExists state by periodically calling the underlying Kinesis.describeStreamoperation every 10 seconds (at most 18 times).
    172    */
    173   waitFor(state: "streamExists", callback?: (err: AWSError, data: Kinesis.Types.DescribeStreamOutput) => void): Request<Kinesis.Types.DescribeStreamOutput, AWSError>;
    174 }
    175 declare namespace Kinesis.Types {
    176   export interface AddTagsToStreamInput {
    177     /**
    178      * The name of the stream.
    179      */
    180     StreamName: StreamName;
    181     /**
    182      * The set of key-value pairs to use to create the tags.
    183      */
    184     Tags: TagMap;
    185   }
    186   export type BooleanObject = boolean;
    187   export interface CreateStreamInput {
    188     /**
    189      * A name to identify the stream. The stream name is scoped to the AWS account used by the application that creates the stream. It is also scoped by region. That is, two streams in two different AWS accounts can have the same name, and two streams in the same AWS account but in two different regions can have the same name.
    190      */
    191     StreamName: StreamName;
    192     /**
    193      * The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput. DefaultShardLimit;
    194      */
    195     ShardCount: PositiveIntegerObject;
    196   }
    197   export type Data = Buffer|Uint8Array|Blob|string;
    198   export interface DecreaseStreamRetentionPeriodInput {
    199     /**
    200      * The name of the stream to modify.
    201      */
    202     StreamName: StreamName;
    203     /**
    204      * The new retention period of the stream, in hours. Must be less than the current retention period.
    205      */
    206     RetentionPeriodHours: PositiveIntegerObject;
    207   }
    208   export interface DeleteStreamInput {
    209     /**
    210      * The name of the stream to delete.
    211      */
    212     StreamName: StreamName;
    213   }
    214   export interface DescribeLimitsInput {
    215   }
    216   export interface DescribeLimitsOutput {
    217     /**
    218      * The maximum number of shards.
    219      */
    220     ShardLimit: ShardCountObject;
    221     /**
    222      * The number of open shards.
    223      */
    224     OpenShardCount: ShardCountObject;
    225   }
    226   export interface DescribeStreamInput {
    227     /**
    228      * The name of the stream to describe.
    229      */
    230     StreamName: StreamName;
    231     /**
    232      * The maximum number of shards to return in a single call. The default value is 100. If you specify a value greater than 100, at most 100 shards are returned.
    233      */
    234     Limit?: DescribeStreamInputLimit;
    235     /**
    236      * The shard ID of the shard to start with.
    237      */
    238     ExclusiveStartShardId?: ShardId;
    239   }
    240   export type DescribeStreamInputLimit = number;
    241   export interface DescribeStreamOutput {
    242     /**
    243      * The current status of the stream, the stream ARN, an array of shard objects that comprise the stream, and whether there are more shards available.
    244      */
    245     StreamDescription: StreamDescription;
    246   }
    247   export interface DisableEnhancedMonitoringInput {
    248     /**
    249      * The name of the Amazon Kinesis stream for which to disable enhanced monitoring.
    250      */
    251     StreamName: StreamName;
    252     /**
    253      * List of shard-level metrics to disable. The following are the valid shard-level metrics. The value "ALL" disables every metric.    IncomingBytes     IncomingRecords     OutgoingBytes     OutgoingRecords     WriteProvisionedThroughputExceeded     ReadProvisionedThroughputExceeded     IteratorAgeMilliseconds     ALL    For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.
    254      */
    255     ShardLevelMetrics: MetricsNameList;
    256   }
    257   export interface EnableEnhancedMonitoringInput {
    258     /**
    259      * The name of the stream for which to enable enhanced monitoring.
    260      */
    261     StreamName: StreamName;
    262     /**
    263      * List of shard-level metrics to enable. The following are the valid shard-level metrics. The value "ALL" enables every metric.    IncomingBytes     IncomingRecords     OutgoingBytes     OutgoingRecords     WriteProvisionedThroughputExceeded     ReadProvisionedThroughputExceeded     IteratorAgeMilliseconds     ALL    For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.
    264      */
    265     ShardLevelMetrics: MetricsNameList;
    266   }
    267   export interface EnhancedMetrics {
    268     /**
    269      * List of shard-level metrics. The following are the valid shard-level metrics. The value "ALL" enhances every metric.    IncomingBytes     IncomingRecords     OutgoingBytes     OutgoingRecords     WriteProvisionedThroughputExceeded     ReadProvisionedThroughputExceeded     IteratorAgeMilliseconds     ALL    For more information, see Monitoring the Amazon Kinesis Streams Service with Amazon CloudWatch in the Amazon Kinesis Streams Developer Guide.
    270      */
    271     ShardLevelMetrics?: MetricsNameList;
    272   }
    273   export type EnhancedMonitoringList = EnhancedMetrics[];
    274   export interface EnhancedMonitoringOutput {
    275     /**
    276      * The name of the Amazon Kinesis stream.
    277      */
    278     StreamName?: StreamName;
    279     /**
    280      * Represents the current state of the metrics that are in the enhanced state before the operation.
    281      */
    282     CurrentShardLevelMetrics?: MetricsNameList;
    283     /**
    284      * Represents the list of all the metrics that would be in the enhanced state after the operation.
    285      */
    286     DesiredShardLevelMetrics?: MetricsNameList;
    287   }
    288   export type ErrorCode = string;
    289   export type ErrorMessage = string;
    290   export interface GetRecordsInput {
    291     /**
    292      * The position in the shard from which you want to start sequentially reading data records. A shard iterator specifies this position using the sequence number of a data record in the shard.
    293      */
    294     ShardIterator: ShardIterator;
    295     /**
    296      * The maximum number of records to return. Specify a value of up to 10,000. If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException.
    297      */
    298     Limit?: GetRecordsInputLimit;
    299   }
    300   export type GetRecordsInputLimit = number;
    301   export interface GetRecordsOutput {
    302     /**
    303      * The data records retrieved from the shard.
    304      */
    305     Records: RecordList;
    306     /**
    307      * The next position in the shard from which to start sequentially reading data records. If set to null, the shard has been closed and the requested iterator will not return any more data. 
    308      */
    309     NextShardIterator?: ShardIterator;
    310     /**
    311      * The number of milliseconds the GetRecords response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates record processing is caught up, and there are no new records to process at this moment.
    312      */
    313     MillisBehindLatest?: MillisBehindLatest;
    314   }
    315   export interface GetShardIteratorInput {
    316     /**
    317      * The name of the Amazon Kinesis stream.
    318      */
    319     StreamName: StreamName;
    320     /**
    321      * The shard ID of the Amazon Kinesis shard to get the iterator for.
    322      */
    323     ShardId: ShardId;
    324     /**
    325      * Determines how the shard iterator is used to start reading data records from the shard. The following are the valid Amazon Kinesis shard iterator types:  AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific sequence number, provided in the value StartingSequenceNumber.  AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number, provided in the value StartingSequenceNumber.  AT_TIMESTAMP - Start reading from the position denoted by a specific timestamp, provided in the value Timestamp.  TRIM_HORIZON - Start reading at the last untrimmed record in the shard in the system, which is the oldest data record in the shard.  LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard.  
    326      */
    327     ShardIteratorType: ShardIteratorType;
    328     /**
    329      * The sequence number of the data record in the shard from which to start reading. Used with shard iterator type AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER.
    330      */
    331     StartingSequenceNumber?: SequenceNumber;
    332     /**
    333      * The timestamp of the data record from which to start reading. Used with shard iterator type AT_TIMESTAMP. A timestamp is the Unix epoch date with precision in milliseconds. For example, 2016-04-04T19:58:46.480-00:00 or 1459799926.480. If a record with this exact timestamp does not exist, the iterator returned is for the next (later) record. If the timestamp is older than the current trim horizon, the iterator returned is for the oldest untrimmed data record (TRIM_HORIZON).
    334      */
    335     Timestamp?: Timestamp;
    336   }
    337   export interface GetShardIteratorOutput {
    338     /**
    339      * The position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard.
    340      */
    341     ShardIterator?: ShardIterator;
    342   }
    343   export type HashKey = string;
    344   export interface HashKeyRange {
    345     /**
    346      * The starting hash key of the hash key range.
    347      */
    348     StartingHashKey: HashKey;
    349     /**
    350      * The ending hash key of the hash key range.
    351      */
    352     EndingHashKey: HashKey;
    353   }
    354   export interface IncreaseStreamRetentionPeriodInput {
    355     /**
    356      * The name of the stream to modify.
    357      */
    358     StreamName: StreamName;
    359     /**
    360      * The new retention period of the stream, in hours. Must be more than the current retention period.
    361      */
    362     RetentionPeriodHours: PositiveIntegerObject;
    363   }
    364   export interface ListStreamsInput {
    365     /**
    366      * The maximum number of streams to list.
    367      */
    368     Limit?: ListStreamsInputLimit;
    369     /**
    370      * The name of the stream to start the list with.
    371      */
    372     ExclusiveStartStreamName?: StreamName;
    373   }
    374   export type ListStreamsInputLimit = number;
    375   export interface ListStreamsOutput {
    376     /**
    377      * The names of the streams that are associated with the AWS account making the ListStreams request.
    378      */
    379     StreamNames: StreamNameList;
    380     /**
    381      * If set to true, there are more streams available to list.
    382      */
    383     HasMoreStreams: BooleanObject;
    384   }
    385   export interface ListTagsForStreamInput {
    386     /**
    387      * The name of the stream.
    388      */
    389     StreamName: StreamName;
    390     /**
    391      * The key to use as the starting point for the list of tags. If this parameter is set, ListTagsForStream gets all tags that occur after ExclusiveStartTagKey. 
    392      */
    393     ExclusiveStartTagKey?: TagKey;
    394     /**
    395      * The number of tags to return. If this number is less than the total number of tags associated with the stream, HasMoreTags is set to true. To list additional tags, set ExclusiveStartTagKey to the last key in the response.
    396      */
    397     Limit?: ListTagsForStreamInputLimit;
    398   }
    399   export type ListTagsForStreamInputLimit = number;
    400   export interface ListTagsForStreamOutput {
    401     /**
    402      * A list of tags associated with StreamName, starting with the first tag after ExclusiveStartTagKey and up to the specified Limit. 
    403      */
    404     Tags: TagList;
    405     /**
    406      * If set to true, more tags are available. To request additional tags, set ExclusiveStartTagKey to the key of the last tag returned.
    407      */
    408     HasMoreTags: BooleanObject;
    409   }
    410   export interface MergeShardsInput {
    411     /**
    412      * The name of the stream for the merge.
    413      */
    414     StreamName: StreamName;
    415     /**
    416      * The shard ID of the shard to combine with the adjacent shard for the merge.
    417      */
    418     ShardToMerge: ShardId;
    419     /**
    420      * The shard ID of the adjacent shard for the merge.
    421      */
    422     AdjacentShardToMerge: ShardId;
    423   }
    424   export type MetricsName = "IncomingBytes"|"IncomingRecords"|"OutgoingBytes"|"OutgoingRecords"|"WriteProvisionedThroughputExceeded"|"ReadProvisionedThroughputExceeded"|"IteratorAgeMilliseconds"|"ALL"|string;
    425   export type MetricsNameList = MetricsName[];
    426   export type MillisBehindLatest = number;
    427   export type PartitionKey = string;
    428   export type PositiveIntegerObject = number;
    429   export interface PutRecordInput {
    430     /**
    431      * The name of the stream to put the data record into.
    432      */
    433     StreamName: StreamName;
    434     /**
    435      * The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).
    436      */
    437     Data: Data;
    438     /**
    439      * Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.
    440      */
    441     PartitionKey: PartitionKey;
    442     /**
    443      * The hash value used to explicitly determine the shard the data record is assigned to by overriding the partition key hash.
    444      */
    445     ExplicitHashKey?: HashKey;
    446     /**
    447      * Guarantees strictly increasing sequence numbers, for puts from the same client and to the same partition key. Usage: set the SequenceNumberForOrdering of record n to the sequence number of record n-1 (as returned in the result when putting record n-1). If this parameter is not set, records will be coarsely ordered based on arrival time.
    448      */
    449     SequenceNumberForOrdering?: SequenceNumber;
    450   }
    451   export interface PutRecordOutput {
    452     /**
    453      * The shard ID of the shard where the data record was placed.
    454      */
    455     ShardId: ShardId;
    456     /**
    457      * The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.
    458      */
    459     SequenceNumber: SequenceNumber;
    460   }
    461   export interface PutRecordsInput {
    462     /**
    463      * The records associated with the request.
    464      */
    465     Records: PutRecordsRequestEntryList;
    466     /**
    467      * The stream name associated with the request.
    468      */
    469     StreamName: StreamName;
    470   }
    471   export interface PutRecordsOutput {
    472     /**
    473      * The number of unsuccessfully processed records in a PutRecords request.
    474      */
    475     FailedRecordCount?: PositiveIntegerObject;
    476     /**
    477      * An array of successfully and unsuccessfully processed record results, correlated with the request by natural ordering. A record that is successfully added to a stream includes SequenceNumber and ShardId in the result. A record that fails to be added to a stream includes ErrorCode and ErrorMessage in the result.
    478      */
    479     Records: PutRecordsResultEntryList;
    480   }
    481   export interface PutRecordsRequestEntry {
    482     /**
    483      * The data blob to put into the record, which is base64-encoded when the blob is serialized. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).
    484      */
    485     Data: Data;
    486     /**
    487      * The hash value used to determine explicitly the shard that the data record is assigned to by overriding the partition key hash.
    488      */
    489     ExplicitHashKey?: HashKey;
    490     /**
    491      * Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream.
    492      */
    493     PartitionKey: PartitionKey;
    494   }
    495   export type PutRecordsRequestEntryList = PutRecordsRequestEntry[];
    496   export interface PutRecordsResultEntry {
    497     /**
    498      * The sequence number for an individual record result.
    499      */
    500     SequenceNumber?: SequenceNumber;
    501     /**
    502      * The shard ID for an individual record result.
    503      */
    504     ShardId?: ShardId;
    505     /**
    506      * The error code for an individual record result. ErrorCodes can be either ProvisionedThroughputExceededException or InternalFailure.
    507      */
    508     ErrorCode?: ErrorCode;
    509     /**
    510      * The error message for an individual record result. An ErrorCode value of ProvisionedThroughputExceededException has an error message that includes the account ID, stream name, and shard ID. An ErrorCode value of InternalFailure has the error message "Internal Service Failure".
    511      */
    512     ErrorMessage?: ErrorMessage;
    513   }
    514   export type PutRecordsResultEntryList = PutRecordsResultEntry[];
    515   export interface Record {
    516     /**
    517      * The unique identifier of the record in the stream.
    518      */
    519     SequenceNumber: SequenceNumber;
    520     /**
    521      * The approximate time that the record was inserted into the stream.
    522      */
    523     ApproximateArrivalTimestamp?: Timestamp;
    524     /**
    525      * The data blob. The data in the blob is both opaque and immutable to the Amazon Kinesis service, which does not inspect, interpret, or change the data in the blob in any way. When the data blob (the payload before base64-encoding) is added to the partition key size, the total size must not exceed the maximum record size (1 MB).
    526      */
    527     Data: Data;
    528     /**
    529      * Identifies which shard in the stream the data record is assigned to.
    530      */
    531     PartitionKey: PartitionKey;
    532   }
    533   export type RecordList = Record[];
    534   export interface RemoveTagsFromStreamInput {
    535     /**
    536      * The name of the stream.
    537      */
    538     StreamName: StreamName;
    539     /**
    540      * A list of tag keys. Each corresponding tag is removed from the stream.
    541      */
    542     TagKeys: TagKeyList;
    543   }
    544   export type ScalingType = "UNIFORM_SCALING"|string;
    545   export type SequenceNumber = string;
    546   export interface SequenceNumberRange {
    547     /**
    548      * The starting sequence number for the range.
    549      */
    550     StartingSequenceNumber: SequenceNumber;
    551     /**
    552      * The ending sequence number for the range. Shards that are in the OPEN state have an ending sequence number of null.
    553      */
    554     EndingSequenceNumber?: SequenceNumber;
    555   }
    556   export interface Shard {
    557     /**
    558      * The unique identifier of the shard within the stream.
    559      */
    560     ShardId: ShardId;
    561     /**
    562      * The shard ID of the shard's parent.
    563      */
    564     ParentShardId?: ShardId;
    565     /**
    566      * The shard ID of the shard adjacent to the shard's parent.
    567      */
    568     AdjacentParentShardId?: ShardId;
    569     /**
    570      * The range of possible hash key values for the shard, which is a set of ordered contiguous positive integers.
    571      */
    572     HashKeyRange: HashKeyRange;
    573     /**
    574      * The range of possible sequence numbers for the shard.
    575      */
    576     SequenceNumberRange: SequenceNumberRange;
    577   }
    578   export type ShardCountObject = number;
    579   export type ShardId = string;
    580   export type ShardIterator = string;
    581   export type ShardIteratorType = "AT_SEQUENCE_NUMBER"|"AFTER_SEQUENCE_NUMBER"|"TRIM_HORIZON"|"LATEST"|"AT_TIMESTAMP"|string;
    582   export type ShardList = Shard[];
    583   export interface SplitShardInput {
    584     /**
    585      * The name of the stream for the shard split.
    586      */
    587     StreamName: StreamName;
    588     /**
    589      * The shard ID of the shard to split.
    590      */
    591     ShardToSplit: ShardId;
    592     /**
    593      * A hash key value for the starting hash key of one of the child shards created by the split. The hash key range for a given shard constitutes a set of ordered contiguous positive integers. The value for NewStartingHashKey must be in the range of hash keys being mapped into the shard. The NewStartingHashKey hash key value and all higher hash key values in hash key range are distributed to one of the child shards. All the lower hash key values in the range are distributed to the other child shard.
    594      */
    595     NewStartingHashKey: HashKey;
    596   }
    597   export type StreamARN = string;
    598   export interface StreamDescription {
    599     /**
    600      * The name of the stream being described.
    601      */
    602     StreamName: StreamName;
    603     /**
    604      * The Amazon Resource Name (ARN) for the stream being described.
    605      */
    606     StreamARN: StreamARN;
    607     /**
    608      * The current status of the stream being described. The stream status is one of the following states:    CREATING - The stream is being created. Amazon Kinesis immediately returns and sets StreamStatus to CREATING.    DELETING - The stream is being deleted. The specified stream is in the DELETING state until Amazon Kinesis completes the deletion.    ACTIVE - The stream exists and is ready for read and write operations or deletion. You should perform read and write operations only on an ACTIVE stream.    UPDATING - Shards in the stream are being merged or split. Read and write operations continue to work while the stream is in the UPDATING state.  
    609      */
    610     StreamStatus: StreamStatus;
    611     /**
    612      * The shards that comprise the stream.
    613      */
    614     Shards: ShardList;
    615     /**
    616      * If set to true, more shards in the stream are available to describe.
    617      */
    618     HasMoreShards: BooleanObject;
    619     /**
    620      * The current retention period, in hours.
    621      */
    622     RetentionPeriodHours: PositiveIntegerObject;
    623     /**
    624      * The approximate time that the stream was created.
    625      */
    626     StreamCreationTimestamp: Timestamp;
    627     /**
    628      * Represents the current enhanced monitoring settings of the stream.
    629      */
    630     EnhancedMonitoring: EnhancedMonitoringList;
    631   }
    632   export type StreamName = string;
    633   export type StreamNameList = StreamName[];
    634   export type StreamStatus = "CREATING"|"DELETING"|"ACTIVE"|"UPDATING"|string;
    635   export interface Tag {
    636     /**
    637      * A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
    638      */
    639     Key: TagKey;
    640     /**
    641      * An optional string, typically used to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
    642      */
    643     Value?: TagValue;
    644   }
    645   export type TagKey = string;
    646   export type TagKeyList = TagKey[];
    647   export type TagList = Tag[];
    648   export type TagMap = {[key: string]: TagValue};
    649   export type TagValue = string;
    650   export type Timestamp = Date;
    651   export interface UpdateShardCountInput {
    652     /**
    653      * The name of the stream.
    654      */
    655     StreamName: StreamName;
    656     /**
    657      * The new number of shards.
    658      */
    659     TargetShardCount: PositiveIntegerObject;
    660     /**
    661      * The scaling type. Uniform scaling creates shards of equal size.
    662      */
    663     ScalingType: ScalingType;
    664   }
    665   export interface UpdateShardCountOutput {
    666     /**
    667      * The name of the stream.
    668      */
    669     StreamName?: StreamName;
    670     /**
    671      * The current number of shards.
    672      */
    673     CurrentShardCount?: PositiveIntegerObject;
    674     /**
    675      * The updated number of shards.
    676      */
    677     TargetShardCount?: PositiveIntegerObject;
    678   }
    679   /**
    680    * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
    681    */
    682   export type apiVersion = "2013-12-02"|"latest"|string;
    683   export interface ClientApiVersions {
    684     /**
    685      * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
    686      */
    687     apiVersion?: apiVersion;
    688   }
    689   export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions;
    690 }
    691 export = Kinesis;