// ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= use std::error::Error; use std::fmt; use async_trait::async_trait; use rusoto_core::credential::ProvideAwsCredentials; use rusoto_core::region; use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest}; use rusoto_core::{Client, RusotoError}; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::proto::xml::error::*; use rusoto_core::proto::xml::util::{ self as xml_util, deserialize_elements, find_start_element, skip_tree, }; use rusoto_core::proto::xml::util::{Next, Peek, XmlParseError, XmlResponse}; use rusoto_core::request::HttpResponse; use rusoto_core::signature::SignedRequest; #[cfg(feature = "deserialize_structs")] use serde::Deserialize; #[cfg(feature = "serialize_structs")] use serde::Serialize; use serde_urlencoded; use std::str::FromStr; use xml::EventReader; impl RedshiftClient { fn new_params(&self, operation_name: &str) -> Params { let mut params = Params::new(); params.put("Action", operation_name); params.put("Version", "2012-12-01"); params } async fn sign_and_dispatch( &self, request: SignedRequest, from_response: fn(BufferedHttpResponse) -> RusotoError, ) -> Result> { let mut response = self.client.sign_and_dispatch(request).await?; if !response.status.is_success() { let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?; return Err(from_response(response)); } Ok(response) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct AcceptReservedNodeExchangeInputMessage { ///

A string representing the node identifier of the DC1 Reserved Node to be exchanged.

pub reserved_node_id: String, ///

The unique identifier of the DC2 Reserved Node offering to be used for the exchange. You can obtain the value for the parameter by calling GetReservedNodeExchangeOfferings

pub target_reserved_node_offering_id: String, } /// Serialize `AcceptReservedNodeExchangeInputMessage` contents to a `SignedRequest`. struct AcceptReservedNodeExchangeInputMessageSerializer; impl AcceptReservedNodeExchangeInputMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &AcceptReservedNodeExchangeInputMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ReservedNodeId"), &obj.reserved_node_id, ); params.put( &format!("{}{}", prefix, "TargetReservedNodeOfferingId"), &obj.target_reserved_node_offering_id, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct AcceptReservedNodeExchangeOutputMessage { ///

pub exchanged_reserved_node: Option, } #[allow(dead_code)] struct AcceptReservedNodeExchangeOutputMessageDeserializer; impl AcceptReservedNodeExchangeOutputMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, AcceptReservedNodeExchangeOutputMessage, _>( tag_name, stack, |name, stack, obj| { match name { "ExchangedReservedNode" => { obj.exchanged_reserved_node = Some(ReservedNodeDeserializer::deserialize( "ExchangedReservedNode", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

A name value pair that describes an aspect of an account.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct AccountAttribute { ///

The name of the attribute.

pub attribute_name: Option, ///

A list of attribute values.

pub attribute_values: Option>, } #[allow(dead_code)] struct AccountAttributeDeserializer; impl AccountAttributeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, AccountAttribute, _>(tag_name, stack, |name, stack, obj| { match name { "AttributeName" => { obj.attribute_name = Some(StringDeserializer::deserialize("AttributeName", stack)?); } "AttributeValues" => { obj.attribute_values.get_or_insert(vec![]).extend( AttributeValueListDeserializer::deserialize("AttributeValues", stack)?, ); } _ => skip_tree(stack), } Ok(()) }) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct AccountAttributeList { ///

A list of attributes assigned to an account.

pub account_attributes: Option>, } #[allow(dead_code)] struct AccountAttributeListDeserializer; impl AccountAttributeListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, AccountAttributeList, _>(tag_name, stack, |name, stack, obj| { match name { "AccountAttributes" => { obj.account_attributes.get_or_insert(vec![]).extend( AttributeListDeserializer::deserialize("AccountAttributes", stack)?, ); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes an AWS customer account authorized to restore a snapshot.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct AccountWithRestoreAccess { ///

The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is amazon-redshift-support.

pub account_alias: Option, ///

The identifier of an AWS customer account authorized to restore a snapshot.

pub account_id: Option, } #[allow(dead_code)] struct AccountWithRestoreAccessDeserializer; impl AccountWithRestoreAccessDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, AccountWithRestoreAccess, _>( tag_name, stack, |name, stack, obj| { match name { "AccountAlias" => { obj.account_alias = Some(StringDeserializer::deserialize("AccountAlias", stack)?); } "AccountId" => { obj.account_id = Some(StringDeserializer::deserialize("AccountId", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct AccountsWithRestoreAccessListDeserializer; impl AccountsWithRestoreAccessListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "AccountWithRestoreAccess" { obj.push(AccountWithRestoreAccessDeserializer::deserialize( "AccountWithRestoreAccess", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct AssociatedClusterListDeserializer; impl AssociatedClusterListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterAssociatedToSchedule" { obj.push(ClusterAssociatedToScheduleDeserializer::deserialize( "ClusterAssociatedToSchedule", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct AttributeListDeserializer; impl AttributeListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "AccountAttribute" { obj.push(AccountAttributeDeserializer::deserialize( "AccountAttribute", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `AttributeNameList` contents to a `SignedRequest`. struct AttributeNameListSerializer; impl AttributeNameListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } #[allow(dead_code)] struct AttributeValueListDeserializer; impl AttributeValueListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "AttributeValueTarget" { obj.push(AttributeValueTargetDeserializer::deserialize( "AttributeValueTarget", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes an attribute value.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct AttributeValueTarget { ///

The value of the attribute.

pub attribute_value: Option, } #[allow(dead_code)] struct AttributeValueTargetDeserializer; impl AttributeValueTargetDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, AttributeValueTarget, _>(tag_name, stack, |name, stack, obj| { match name { "AttributeValue" => { obj.attribute_value = Some(StringDeserializer::deserialize("AttributeValue", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct AuthorizeClusterSecurityGroupIngressMessage { ///

The IP range to be added the Amazon Redshift security group.

pub cidrip: Option, ///

The name of the security group to which the ingress rule is added.

pub cluster_security_group_name: String, ///

The EC2 security group to be added the Amazon Redshift security group.

pub ec2_security_group_name: Option, ///

The AWS account number of the owner of the security group specified by the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value.

Example: 111122223333

pub ec2_security_group_owner_id: Option, } /// Serialize `AuthorizeClusterSecurityGroupIngressMessage` contents to a `SignedRequest`. struct AuthorizeClusterSecurityGroupIngressMessageSerializer; impl AuthorizeClusterSecurityGroupIngressMessageSerializer { fn serialize( params: &mut Params, name: &str, obj: &AuthorizeClusterSecurityGroupIngressMessage, ) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cidrip { params.put(&format!("{}{}", prefix, "CIDRIP"), &field_value); } params.put( &format!("{}{}", prefix, "ClusterSecurityGroupName"), &obj.cluster_security_group_name, ); if let Some(ref field_value) = obj.ec2_security_group_name { params.put( &format!("{}{}", prefix, "EC2SecurityGroupName"), &field_value, ); } if let Some(ref field_value) = obj.ec2_security_group_owner_id { params.put( &format!("{}{}", prefix, "EC2SecurityGroupOwnerId"), &field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct AuthorizeClusterSecurityGroupIngressResult { pub cluster_security_group: Option, } #[allow(dead_code)] struct AuthorizeClusterSecurityGroupIngressResultDeserializer; impl AuthorizeClusterSecurityGroupIngressResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, AuthorizeClusterSecurityGroupIngressResult, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterSecurityGroup" => { obj.cluster_security_group = Some(ClusterSecurityGroupDeserializer::deserialize( "ClusterSecurityGroup", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct AuthorizeSnapshotAccessMessage { ///

The identifier of the AWS customer account authorized to restore the specified snapshot.

To share a snapshot with AWS support, specify amazon-redshift-support.

pub account_with_restore_access: String, ///

The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

pub snapshot_cluster_identifier: Option, ///

The identifier of the snapshot the account is authorized to restore.

pub snapshot_identifier: String, } /// Serialize `AuthorizeSnapshotAccessMessage` contents to a `SignedRequest`. struct AuthorizeSnapshotAccessMessageSerializer; impl AuthorizeSnapshotAccessMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &AuthorizeSnapshotAccessMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "AccountWithRestoreAccess"), &obj.account_with_restore_access, ); if let Some(ref field_value) = obj.snapshot_cluster_identifier { params.put( &format!("{}{}", prefix, "SnapshotClusterIdentifier"), &field_value, ); } params.put( &format!("{}{}", prefix, "SnapshotIdentifier"), &obj.snapshot_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct AuthorizeSnapshotAccessResult { pub snapshot: Option, } #[allow(dead_code)] struct AuthorizeSnapshotAccessResultDeserializer; impl AuthorizeSnapshotAccessResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, AuthorizeSnapshotAccessResult, _>( tag_name, stack, |name, stack, obj| { match name { "Snapshot" => { obj.snapshot = Some(SnapshotDeserializer::deserialize("Snapshot", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes an availability zone.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct AvailabilityZone { ///

The name of the availability zone.

pub name: Option, ///

pub supported_platforms: Option>, } #[allow(dead_code)] struct AvailabilityZoneDeserializer; impl AvailabilityZoneDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, AvailabilityZone, _>(tag_name, stack, |name, stack, obj| { match name { "Name" => { obj.name = Some(StringDeserializer::deserialize("Name", stack)?); } "SupportedPlatforms" => { obj.supported_platforms.get_or_insert(vec![]).extend( SupportedPlatformsListDeserializer::deserialize( "SupportedPlatforms", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct AvailabilityZoneListDeserializer; impl AvailabilityZoneListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "AvailabilityZone" { obj.push(AvailabilityZoneDeserializer::deserialize( "AvailabilityZone", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct BatchDeleteClusterSnapshotsRequest { ///

A list of identifiers for the snapshots that you want to delete.

pub identifiers: Vec, } /// Serialize `BatchDeleteClusterSnapshotsRequest` contents to a `SignedRequest`. struct BatchDeleteClusterSnapshotsRequestSerializer; impl BatchDeleteClusterSnapshotsRequestSerializer { fn serialize(params: &mut Params, name: &str, obj: &BatchDeleteClusterSnapshotsRequest) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } DeleteClusterSnapshotMessageListSerializer::serialize( params, &format!("{}{}", prefix, "DeleteClusterSnapshotMessage"), &obj.identifiers, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct BatchDeleteClusterSnapshotsResult { ///

A list of any errors returned.

pub errors: Option>, ///

A list of the snapshot identifiers that were deleted.

pub resources: Option>, } #[allow(dead_code)] struct BatchDeleteClusterSnapshotsResultDeserializer; impl BatchDeleteClusterSnapshotsResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, BatchDeleteClusterSnapshotsResult, _>( tag_name, stack, |name, stack, obj| { match name { "Errors" => { obj.errors.get_or_insert(vec![]).extend( BatchSnapshotOperationErrorListDeserializer::deserialize( "Errors", stack, )?, ); } "Resources" => { obj.resources.get_or_insert(vec![]).extend( SnapshotIdentifierListDeserializer::deserialize("Resources", stack)?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct BatchModifyClusterSnapshotsMessage { ///

A boolean value indicating whether to override an exception if the retention period has passed.

pub force: Option, ///

The number of days that a manual snapshot is retained. If you specify the value -1, the manual snapshot is retained indefinitely.

The number must be either -1 or an integer between 1 and 3,653.

If you decrease the manual snapshot retention period from its current value, existing manual snapshots that fall outside of the new retention period will return an error. If you want to suppress the errors and delete the snapshots, use the force option.

pub manual_snapshot_retention_period: Option, ///

A list of snapshot identifiers you want to modify.

pub snapshot_identifier_list: Vec, } /// Serialize `BatchModifyClusterSnapshotsMessage` contents to a `SignedRequest`. struct BatchModifyClusterSnapshotsMessageSerializer; impl BatchModifyClusterSnapshotsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &BatchModifyClusterSnapshotsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.force { params.put(&format!("{}{}", prefix, "Force"), &field_value); } if let Some(ref field_value) = obj.manual_snapshot_retention_period { params.put( &format!("{}{}", prefix, "ManualSnapshotRetentionPeriod"), &field_value, ); } SnapshotIdentifierListSerializer::serialize( params, &format!("{}{}", prefix, "String"), &obj.snapshot_identifier_list, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct BatchModifyClusterSnapshotsOutputMessage { ///

A list of any errors returned.

pub errors: Option>, ///

A list of the snapshots that were modified.

pub resources: Option>, } #[allow(dead_code)] struct BatchModifyClusterSnapshotsOutputMessageDeserializer; impl BatchModifyClusterSnapshotsOutputMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, BatchModifyClusterSnapshotsOutputMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Errors" => { obj.errors.get_or_insert(vec![]).extend( BatchSnapshotOperationErrorsDeserializer::deserialize("Errors", stack)?, ); } "Resources" => { obj.resources.get_or_insert(vec![]).extend( SnapshotIdentifierListDeserializer::deserialize("Resources", stack)?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct BatchSnapshotOperationErrorListDeserializer; impl BatchSnapshotOperationErrorListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "SnapshotErrorMessage" { obj.push(SnapshotErrorMessageDeserializer::deserialize( "SnapshotErrorMessage", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct BatchSnapshotOperationErrorsDeserializer; impl BatchSnapshotOperationErrorsDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "SnapshotErrorMessage" { obj.push(SnapshotErrorMessageDeserializer::deserialize( "SnapshotErrorMessage", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct BooleanDeserializer; impl BooleanDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, |s| Ok(bool::from_str(&s).unwrap())) } } #[allow(dead_code)] struct BooleanOptionalDeserializer; impl BooleanOptionalDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, |s| Ok(bool::from_str(&s).unwrap())) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CancelResizeMessage { ///

The unique identifier for the cluster that you want to cancel a resize operation for.

pub cluster_identifier: String, } /// Serialize `CancelResizeMessage` contents to a `SignedRequest`. struct CancelResizeMessageSerializer; impl CancelResizeMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CancelResizeMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } ///

Describes a cluster.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct Cluster { ///

A boolean value that, if true, indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.

pub allow_version_upgrade: Option, ///

The number of days that automatic cluster snapshots are retained.

pub automated_snapshot_retention_period: Option, ///

The name of the Availability Zone in which the cluster is located.

pub availability_zone: Option, ///

The availability status of the cluster for queries. Possible values are the following:

  • Available - The cluster is available for queries.

  • Unavailable - The cluster is not available for queries.

  • Maintenance - The cluster is intermittently available for queries due to maintenance activities.

  • Modifying - The cluster is intermittently available for queries due to changes that modify the cluster.

  • Failed - The cluster failed and is not available for queries.

pub cluster_availability_status: Option, ///

The date and time that the cluster was created.

pub cluster_create_time: Option, ///

The unique identifier of the cluster.

pub cluster_identifier: Option, ///

The nodes in the cluster.

pub cluster_nodes: Option>, ///

The list of cluster parameter groups that are associated with this cluster. Each parameter group in the list is returned with its status.

pub cluster_parameter_groups: Option>, ///

The public key for the cluster.

pub cluster_public_key: Option, ///

The specific revision number of the database in the cluster.

pub cluster_revision_number: Option, ///

A list of cluster security group that are associated with the cluster. Each security group is represented by an element that contains ClusterSecurityGroup.Name and ClusterSecurityGroup.Status subelements.

Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud (VPC). Clusters that are created in a VPC use VPC security groups, which are listed by the VpcSecurityGroups parameter.

pub cluster_security_groups: Option>, ///

A value that returns the destination region and retention period that are configured for cross-region snapshot copy.

pub cluster_snapshot_copy_status: Option, ///

The current state of the cluster. Possible values are the following:

  • available

  • available, prep-for-resize

  • available, resize-cleanup

  • cancelling-resize

  • creating

  • deleting

  • final-snapshot

  • hardware-failure

  • incompatible-hsm

  • incompatible-network

  • incompatible-parameters

  • incompatible-restore

  • modifying

  • paused

  • rebooting

  • renaming

  • resizing

  • rotating-keys

  • storage-full

  • updating-hsm

pub cluster_status: Option, ///

The name of the subnet group that is associated with the cluster. This parameter is valid only when the cluster is in a VPC.

pub cluster_subnet_group_name: Option, ///

The version ID of the Amazon Redshift engine that is running on the cluster.

pub cluster_version: Option, ///

The name of the initial database that was created when the cluster was created. This same name is returned for the life of the cluster. If an initial database was not specified, a database named devdev was created by default.

pub db_name: Option, ///

pub data_transfer_progress: Option, ///

Describes a group of DeferredMaintenanceWindow objects.

pub deferred_maintenance_windows: Option>, ///

The status of the elastic IP (EIP) address.

pub elastic_ip_status: Option, ///

The number of nodes that you can resize the cluster to with the elastic resize method.

pub elastic_resize_number_of_node_options: Option, ///

A boolean value that, if true, indicates that data in the cluster is encrypted at rest.

pub encrypted: Option, ///

The connection endpoint.

pub endpoint: Option, ///

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

pub enhanced_vpc_routing: Option, ///

The date and time when the next snapshot is expected to be taken for clusters with a valid snapshot schedule and backups enabled.

pub expected_next_snapshot_schedule_time: Option, ///

The status of next expected snapshot for clusters having a valid snapshot schedule and backups enabled. Possible values are the following:

  • OnTrack - The next snapshot is expected to be taken on time.

  • Pending - The next snapshot is pending to be taken.

pub expected_next_snapshot_schedule_time_status: Option, ///

A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.

Values: active, applying

pub hsm_status: Option, ///

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

pub iam_roles: Option>, ///

The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.

pub kms_key_id: Option, ///

The name of the maintenance track for the cluster.

pub maintenance_track_name: Option, ///

The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots.

The value must be either -1 or an integer between 1 and 3,653.

pub manual_snapshot_retention_period: Option, ///

The master user name for the cluster. This name is used to connect to the database that is specified in the DBName parameter.

pub master_username: Option, ///

The status of a modify operation, if any, initiated for the cluster.

pub modify_status: Option, ///

The date and time in UTC when system maintenance can begin.

pub next_maintenance_window_start_time: Option, ///

The node type for the nodes in the cluster.

pub node_type: Option, ///

The number of compute nodes in the cluster.

pub number_of_nodes: Option, ///

Cluster operations that are waiting to be started.

pub pending_actions: Option>, ///

A value that, if present, indicates that changes to the cluster are pending. Specific pending changes are identified by subelements.

pub pending_modified_values: Option, ///

The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.

pub preferred_maintenance_window: Option, ///

A boolean value that, if true, indicates that the cluster can be accessed from a public network.

pub publicly_accessible: Option, ///

Returns the following:

  • AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.

  • ResizeType: Returns ClassicResize

pub resize_info: Option, ///

A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.

pub restore_status: Option, ///

A unique identifier for the cluster snapshot schedule.

pub snapshot_schedule_identifier: Option, ///

The current state of the cluster snapshot schedule.

pub snapshot_schedule_state: Option, ///

The list of tags for the cluster.

pub tags: Option>, ///

The identifier of the VPC the cluster is in, if the cluster is in a VPC.

pub vpc_id: Option, ///

A list of Amazon Virtual Private Cloud (Amazon VPC) security groups that are associated with the cluster. This parameter is returned only if the cluster is in a VPC.

pub vpc_security_groups: Option>, } #[allow(dead_code)] struct ClusterDeserializer; impl ClusterDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, Cluster, _>(tag_name, stack, |name, stack, obj| { match name { "AllowVersionUpgrade" => { obj.allow_version_upgrade = Some(BooleanDeserializer::deserialize( "AllowVersionUpgrade", stack, )?); } "AutomatedSnapshotRetentionPeriod" => { obj.automated_snapshot_retention_period = Some(IntegerDeserializer::deserialize( "AutomatedSnapshotRetentionPeriod", stack, )?); } "AvailabilityZone" => { obj.availability_zone = Some(StringDeserializer::deserialize("AvailabilityZone", stack)?); } "ClusterAvailabilityStatus" => { obj.cluster_availability_status = Some(StringDeserializer::deserialize( "ClusterAvailabilityStatus", stack, )?); } "ClusterCreateTime" => { obj.cluster_create_time = Some(TStampDeserializer::deserialize("ClusterCreateTime", stack)?); } "ClusterIdentifier" => { obj.cluster_identifier = Some(StringDeserializer::deserialize("ClusterIdentifier", stack)?); } "ClusterNodes" => { obj.cluster_nodes.get_or_insert(vec![]).extend( ClusterNodesListDeserializer::deserialize("ClusterNodes", stack)?, ); } "ClusterParameterGroups" => { obj.cluster_parameter_groups.get_or_insert(vec![]).extend( ClusterParameterGroupStatusListDeserializer::deserialize( "ClusterParameterGroups", stack, )?, ); } "ClusterPublicKey" => { obj.cluster_public_key = Some(StringDeserializer::deserialize("ClusterPublicKey", stack)?); } "ClusterRevisionNumber" => { obj.cluster_revision_number = Some(StringDeserializer::deserialize( "ClusterRevisionNumber", stack, )?); } "ClusterSecurityGroups" => { obj.cluster_security_groups.get_or_insert(vec![]).extend( ClusterSecurityGroupMembershipListDeserializer::deserialize( "ClusterSecurityGroups", stack, )?, ); } "ClusterSnapshotCopyStatus" => { obj.cluster_snapshot_copy_status = Some(ClusterSnapshotCopyStatusDeserializer::deserialize( "ClusterSnapshotCopyStatus", stack, )?); } "ClusterStatus" => { obj.cluster_status = Some(StringDeserializer::deserialize("ClusterStatus", stack)?); } "ClusterSubnetGroupName" => { obj.cluster_subnet_group_name = Some(StringDeserializer::deserialize( "ClusterSubnetGroupName", stack, )?); } "ClusterVersion" => { obj.cluster_version = Some(StringDeserializer::deserialize("ClusterVersion", stack)?); } "DBName" => { obj.db_name = Some(StringDeserializer::deserialize("DBName", stack)?); } "DataTransferProgress" => { obj.data_transfer_progress = Some(DataTransferProgressDeserializer::deserialize( "DataTransferProgress", stack, )?); } "DeferredMaintenanceWindows" => { obj.deferred_maintenance_windows .get_or_insert(vec![]) .extend(DeferredMaintenanceWindowsListDeserializer::deserialize( "DeferredMaintenanceWindows", stack, )?); } "ElasticIpStatus" => { obj.elastic_ip_status = Some(ElasticIpStatusDeserializer::deserialize( "ElasticIpStatus", stack, )?); } "ElasticResizeNumberOfNodeOptions" => { obj.elastic_resize_number_of_node_options = Some( StringDeserializer::deserialize("ElasticResizeNumberOfNodeOptions", stack)?, ); } "Encrypted" => { obj.encrypted = Some(BooleanDeserializer::deserialize("Encrypted", stack)?); } "Endpoint" => { obj.endpoint = Some(EndpointDeserializer::deserialize("Endpoint", stack)?); } "EnhancedVpcRouting" => { obj.enhanced_vpc_routing = Some(BooleanDeserializer::deserialize( "EnhancedVpcRouting", stack, )?); } "ExpectedNextSnapshotScheduleTime" => { obj.expected_next_snapshot_schedule_time = Some( TStampDeserializer::deserialize("ExpectedNextSnapshotScheduleTime", stack)?, ); } "ExpectedNextSnapshotScheduleTimeStatus" => { obj.expected_next_snapshot_schedule_time_status = Some(StringDeserializer::deserialize( "ExpectedNextSnapshotScheduleTimeStatus", stack, )?); } "HsmStatus" => { obj.hsm_status = Some(HsmStatusDeserializer::deserialize("HsmStatus", stack)?); } "IamRoles" => { obj.iam_roles.get_or_insert(vec![]).extend( ClusterIamRoleListDeserializer::deserialize("IamRoles", stack)?, ); } "KmsKeyId" => { obj.kms_key_id = Some(StringDeserializer::deserialize("KmsKeyId", stack)?); } "MaintenanceTrackName" => { obj.maintenance_track_name = Some(StringDeserializer::deserialize( "MaintenanceTrackName", stack, )?); } "ManualSnapshotRetentionPeriod" => { obj.manual_snapshot_retention_period = Some(IntegerDeserializer::deserialize( "ManualSnapshotRetentionPeriod", stack, )?); } "MasterUsername" => { obj.master_username = Some(StringDeserializer::deserialize("MasterUsername", stack)?); } "ModifyStatus" => { obj.modify_status = Some(StringDeserializer::deserialize("ModifyStatus", stack)?); } "NextMaintenanceWindowStartTime" => { obj.next_maintenance_window_start_time = Some(TStampDeserializer::deserialize( "NextMaintenanceWindowStartTime", stack, )?); } "NodeType" => { obj.node_type = Some(StringDeserializer::deserialize("NodeType", stack)?); } "NumberOfNodes" => { obj.number_of_nodes = Some(IntegerDeserializer::deserialize("NumberOfNodes", stack)?); } "PendingActions" => { obj.pending_actions.get_or_insert(vec![]).extend( PendingActionsListDeserializer::deserialize("PendingActions", stack)?, ); } "PendingModifiedValues" => { obj.pending_modified_values = Some(PendingModifiedValuesDeserializer::deserialize( "PendingModifiedValues", stack, )?); } "PreferredMaintenanceWindow" => { obj.preferred_maintenance_window = Some(StringDeserializer::deserialize( "PreferredMaintenanceWindow", stack, )?); } "PubliclyAccessible" => { obj.publicly_accessible = Some(BooleanDeserializer::deserialize( "PubliclyAccessible", stack, )?); } "ResizeInfo" => { obj.resize_info = Some(ResizeInfoDeserializer::deserialize("ResizeInfo", stack)?); } "RestoreStatus" => { obj.restore_status = Some(RestoreStatusDeserializer::deserialize( "RestoreStatus", stack, )?); } "SnapshotScheduleIdentifier" => { obj.snapshot_schedule_identifier = Some(StringDeserializer::deserialize( "SnapshotScheduleIdentifier", stack, )?); } "SnapshotScheduleState" => { obj.snapshot_schedule_state = Some(ScheduleStateDeserializer::deserialize( "SnapshotScheduleState", stack, )?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } "VpcId" => { obj.vpc_id = Some(StringDeserializer::deserialize("VpcId", stack)?); } "VpcSecurityGroups" => { obj.vpc_security_groups.get_or_insert(vec![]).extend( VpcSecurityGroupMembershipListDeserializer::deserialize( "VpcSecurityGroups", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterAssociatedToSchedule { ///

pub cluster_identifier: Option, ///

pub schedule_association_state: Option, } #[allow(dead_code)] struct ClusterAssociatedToScheduleDeserializer; impl ClusterAssociatedToScheduleDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterAssociatedToSchedule, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterIdentifier" => { obj.cluster_identifier = Some(StringDeserializer::deserialize("ClusterIdentifier", stack)?); } "ScheduleAssociationState" => { obj.schedule_association_state = Some(ScheduleStateDeserializer::deserialize( "ScheduleAssociationState", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Temporary credentials with authorization to log on to an Amazon Redshift database.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterCredentials { ///

A temporary password that authorizes the user name returned by DbUser to log on to the database DbName.

pub db_password: Option, ///

A database user name that is authorized to log on to the database DbName using the password DbPassword. If the specified DbUser exists in the database, the new user name has the same database privileges as the the user named in DbUser. By default, the user is added to PUBLIC. If the DbGroups parameter is specifed, DbUser is added to the listed groups for any sessions created using these credentials.

pub db_user: Option, ///

The date and time the password in DbPassword expires.

pub expiration: Option, } #[allow(dead_code)] struct ClusterCredentialsDeserializer; impl ClusterCredentialsDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterCredentials, _>(tag_name, stack, |name, stack, obj| { match name { "DbPassword" => { obj.db_password = Some(SensitiveStringDeserializer::deserialize( "DbPassword", stack, )?); } "DbUser" => { obj.db_user = Some(StringDeserializer::deserialize("DbUser", stack)?); } "Expiration" => { obj.expiration = Some(TStampDeserializer::deserialize("Expiration", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes a ClusterDbRevision.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterDbRevision { ///

The unique identifier of the cluster.

pub cluster_identifier: Option, ///

A string representing the current cluster version.

pub current_database_revision: Option, ///

The date on which the database revision was released.

pub database_revision_release_date: Option, ///

A list of RevisionTarget objects, where each object describes the database revision that a cluster can be updated to.

pub revision_targets: Option>, } #[allow(dead_code)] struct ClusterDbRevisionDeserializer; impl ClusterDbRevisionDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterDbRevision, _>(tag_name, stack, |name, stack, obj| { match name { "ClusterIdentifier" => { obj.cluster_identifier = Some(StringDeserializer::deserialize("ClusterIdentifier", stack)?); } "CurrentDatabaseRevision" => { obj.current_database_revision = Some(StringDeserializer::deserialize( "CurrentDatabaseRevision", stack, )?); } "DatabaseRevisionReleaseDate" => { obj.database_revision_release_date = Some(TStampDeserializer::deserialize( "DatabaseRevisionReleaseDate", stack, )?); } "RevisionTargets" => { obj.revision_targets.get_or_insert(vec![]).extend( RevisionTargetsListDeserializer::deserialize("RevisionTargets", stack)?, ); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct ClusterDbRevisionsListDeserializer; impl ClusterDbRevisionsListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterDbRevision" { obj.push(ClusterDbRevisionDeserializer::deserialize( "ClusterDbRevision", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterDbRevisionsMessage { ///

A list of revisions.

pub cluster_db_revisions: Option>, ///

A string representing the starting point for the next set of revisions. If a value is returned in a response, you can retrieve the next set of revisions by providing the value in the marker parameter and retrying the command. If the marker field is empty, all revisions have already been returned.

pub marker: Option, } #[allow(dead_code)] struct ClusterDbRevisionsMessageDeserializer; impl ClusterDbRevisionsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterDbRevisionsMessage, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterDbRevisions" => { obj.cluster_db_revisions.get_or_insert(vec![]).extend( ClusterDbRevisionsListDeserializer::deserialize( "ClusterDbRevisions", stack, )?, ); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterIamRole { ///

A value that describes the status of the IAM role's association with an Amazon Redshift cluster.

The following are possible statuses and descriptions.

  • in-sync: The role is available for use by the cluster.

  • adding: The role is in the process of being associated with the cluster.

  • removing: The role is in the process of being disassociated with the cluster.

pub apply_status: Option, ///

The Amazon Resource Name (ARN) of the IAM role, for example, arn:aws:iam::123456789012:role/RedshiftCopyUnload.

pub iam_role_arn: Option, } #[allow(dead_code)] struct ClusterIamRoleDeserializer; impl ClusterIamRoleDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterIamRole, _>(tag_name, stack, |name, stack, obj| { match name { "ApplyStatus" => { obj.apply_status = Some(StringDeserializer::deserialize("ApplyStatus", stack)?); } "IamRoleArn" => { obj.iam_role_arn = Some(StringDeserializer::deserialize("IamRoleArn", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct ClusterIamRoleListDeserializer; impl ClusterIamRoleListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterIamRole" { obj.push(ClusterIamRoleDeserializer::deserialize( "ClusterIamRole", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct ClusterListDeserializer; impl ClusterListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "Cluster" { obj.push(ClusterDeserializer::deserialize("Cluster", stack)?); } else { skip_tree(stack); } Ok(()) }) } } ///

The identifier of a node in a cluster.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterNode { ///

Whether the node is a leader node or a compute node.

pub node_role: Option, ///

The private IP address of a node within a cluster.

pub private_ip_address: Option, ///

The public IP address of a node within a cluster.

pub public_ip_address: Option, } #[allow(dead_code)] struct ClusterNodeDeserializer; impl ClusterNodeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterNode, _>(tag_name, stack, |name, stack, obj| { match name { "NodeRole" => { obj.node_role = Some(StringDeserializer::deserialize("NodeRole", stack)?); } "PrivateIPAddress" => { obj.private_ip_address = Some(StringDeserializer::deserialize("PrivateIPAddress", stack)?); } "PublicIPAddress" => { obj.public_ip_address = Some(StringDeserializer::deserialize("PublicIPAddress", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct ClusterNodesListDeserializer; impl ClusterNodesListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "member" { obj.push(ClusterNodeDeserializer::deserialize("member", stack)?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes a parameter group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterParameterGroup { ///

The description of the parameter group.

pub description: Option, ///

The name of the cluster parameter group family that this cluster parameter group is compatible with.

pub parameter_group_family: Option, ///

The name of the cluster parameter group.

pub parameter_group_name: Option, ///

The list of tags for the cluster parameter group.

pub tags: Option>, } #[allow(dead_code)] struct ClusterParameterGroupDeserializer; impl ClusterParameterGroupDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterParameterGroup, _>(tag_name, stack, |name, stack, obj| { match name { "Description" => { obj.description = Some(StringDeserializer::deserialize("Description", stack)?); } "ParameterGroupFamily" => { obj.parameter_group_family = Some(StringDeserializer::deserialize( "ParameterGroupFamily", stack, )?); } "ParameterGroupName" => { obj.parameter_group_name = Some(StringDeserializer::deserialize( "ParameterGroupName", stack, )?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Contains the output from the DescribeClusterParameters action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterParameterGroupDetails { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

A list of Parameter instances. Each instance lists the parameters of one cluster parameter group.

pub parameters: Option>, } #[allow(dead_code)] struct ClusterParameterGroupDetailsDeserializer; impl ClusterParameterGroupDetailsDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterParameterGroupDetails, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "Parameters" => { obj.parameters.get_or_insert(vec![]).extend( ParametersListDeserializer::deserialize("Parameters", stack)?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterParameterGroupNameMessage { ///

The name of the cluster parameter group.

pub parameter_group_name: Option, ///

The status of the parameter group. For example, if you made a change to a parameter group name-value pair, then the change could be pending a reboot of an associated cluster.

pub parameter_group_status: Option, } #[allow(dead_code)] struct ClusterParameterGroupNameMessageDeserializer; impl ClusterParameterGroupNameMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterParameterGroupNameMessage, _>( tag_name, stack, |name, stack, obj| { match name { "ParameterGroupName" => { obj.parameter_group_name = Some(StringDeserializer::deserialize( "ParameterGroupName", stack, )?); } "ParameterGroupStatus" => { obj.parameter_group_status = Some(StringDeserializer::deserialize( "ParameterGroupStatus", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes the status of a parameter group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterParameterGroupStatus { ///

The list of parameter statuses.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

pub cluster_parameter_status_list: Option>, ///

The status of parameter updates.

pub parameter_apply_status: Option, ///

The name of the cluster parameter group.

pub parameter_group_name: Option, } #[allow(dead_code)] struct ClusterParameterGroupStatusDeserializer; impl ClusterParameterGroupStatusDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterParameterGroupStatus, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterParameterStatusList" => { obj.cluster_parameter_status_list .get_or_insert(vec![]) .extend(ClusterParameterStatusListDeserializer::deserialize( "ClusterParameterStatusList", stack, )?); } "ParameterApplyStatus" => { obj.parameter_apply_status = Some(StringDeserializer::deserialize( "ParameterApplyStatus", stack, )?); } "ParameterGroupName" => { obj.parameter_group_name = Some(StringDeserializer::deserialize( "ParameterGroupName", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct ClusterParameterGroupStatusListDeserializer; impl ClusterParameterGroupStatusListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterParameterGroup" { obj.push(ClusterParameterGroupStatusDeserializer::deserialize( "ClusterParameterGroup", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Contains the output from the DescribeClusterParameterGroups action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterParameterGroupsMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

A list of ClusterParameterGroup instances. Each instance describes one cluster parameter group.

pub parameter_groups: Option>, } #[allow(dead_code)] struct ClusterParameterGroupsMessageDeserializer; impl ClusterParameterGroupsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterParameterGroupsMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "ParameterGroups" => { obj.parameter_groups.get_or_insert(vec![]).extend( ParameterGroupListDeserializer::deserialize("ParameterGroups", stack)?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes the status of a parameter group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterParameterStatus { ///

The error that prevented the parameter from being applied to the database.

pub parameter_apply_error_description: Option, ///

The status of the parameter that indicates whether the parameter is in sync with the database, waiting for a cluster reboot, or encountered an error when being applied.

The following are possible statuses and descriptions.

  • in-sync: The parameter value is in sync with the database.

  • pending-reboot: The parameter value will be applied after the cluster reboots.

  • applying: The parameter value is being applied to the database.

  • invalid-parameter: Cannot apply the parameter value because it has an invalid value or syntax.

  • apply-deferred: The parameter contains static property changes. The changes are deferred until the cluster reboots.

  • apply-error: Cannot connect to the cluster. The parameter change will be applied after the cluster reboots.

  • unknown-error: Cannot apply the parameter change right now. The change will be applied after the cluster reboots.

pub parameter_apply_status: Option, ///

The name of the parameter.

pub parameter_name: Option, } #[allow(dead_code)] struct ClusterParameterStatusDeserializer; impl ClusterParameterStatusDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterParameterStatus, _>(tag_name, stack, |name, stack, obj| { match name { "ParameterApplyErrorDescription" => { obj.parameter_apply_error_description = Some(StringDeserializer::deserialize( "ParameterApplyErrorDescription", stack, )?); } "ParameterApplyStatus" => { obj.parameter_apply_status = Some(StringDeserializer::deserialize( "ParameterApplyStatus", stack, )?); } "ParameterName" => { obj.parameter_name = Some(StringDeserializer::deserialize("ParameterName", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct ClusterParameterStatusListDeserializer; impl ClusterParameterStatusListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "member" { obj.push(ClusterParameterStatusDeserializer::deserialize( "member", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes a security group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterSecurityGroup { ///

The name of the cluster security group to which the operation was applied.

pub cluster_security_group_name: Option, ///

A description of the security group.

pub description: Option, ///

A list of EC2 security groups that are permitted to access clusters associated with this cluster security group.

pub ec2_security_groups: Option>, ///

A list of IP ranges (CIDR blocks) that are permitted to access clusters associated with this cluster security group.

pub ip_ranges: Option>, ///

The list of tags for the cluster security group.

pub tags: Option>, } #[allow(dead_code)] struct ClusterSecurityGroupDeserializer; impl ClusterSecurityGroupDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterSecurityGroup, _>(tag_name, stack, |name, stack, obj| { match name { "ClusterSecurityGroupName" => { obj.cluster_security_group_name = Some(StringDeserializer::deserialize( "ClusterSecurityGroupName", stack, )?); } "Description" => { obj.description = Some(StringDeserializer::deserialize("Description", stack)?); } "EC2SecurityGroups" => { obj.ec2_security_groups.get_or_insert(vec![]).extend( EC2SecurityGroupListDeserializer::deserialize("EC2SecurityGroups", stack)?, ); } "IPRanges" => { obj.ip_ranges .get_or_insert(vec![]) .extend(IPRangeListDeserializer::deserialize("IPRanges", stack)?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes a cluster security group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterSecurityGroupMembership { ///

The name of the cluster security group.

pub cluster_security_group_name: Option, ///

The status of the cluster security group.

pub status: Option, } #[allow(dead_code)] struct ClusterSecurityGroupMembershipDeserializer; impl ClusterSecurityGroupMembershipDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterSecurityGroupMembership, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterSecurityGroupName" => { obj.cluster_security_group_name = Some(StringDeserializer::deserialize( "ClusterSecurityGroupName", stack, )?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct ClusterSecurityGroupMembershipListDeserializer; impl ClusterSecurityGroupMembershipListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterSecurityGroup" { obj.push(ClusterSecurityGroupMembershipDeserializer::deserialize( "ClusterSecurityGroup", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterSecurityGroupMessage { ///

A list of ClusterSecurityGroup instances.

pub cluster_security_groups: Option>, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, } #[allow(dead_code)] struct ClusterSecurityGroupMessageDeserializer; impl ClusterSecurityGroupMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterSecurityGroupMessage, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterSecurityGroups" => { obj.cluster_security_groups.get_or_insert(vec![]).extend( ClusterSecurityGroupsDeserializer::deserialize( "ClusterSecurityGroups", stack, )?, ); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } /// Serialize `ClusterSecurityGroupNameList` contents to a `SignedRequest`. struct ClusterSecurityGroupNameListSerializer; impl ClusterSecurityGroupNameListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } #[allow(dead_code)] struct ClusterSecurityGroupsDeserializer; impl ClusterSecurityGroupsDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterSecurityGroup" { obj.push(ClusterSecurityGroupDeserializer::deserialize( "ClusterSecurityGroup", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Returns the destination region and retention period that are configured for cross-region snapshot copy.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterSnapshotCopyStatus { ///

The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.

pub destination_region: Option, ///

The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

pub manual_snapshot_retention_period: Option, ///

The number of days that automated snapshots are retained in the destination region after they are copied from a source region.

pub retention_period: Option, ///

The name of the snapshot copy grant.

pub snapshot_copy_grant_name: Option, } #[allow(dead_code)] struct ClusterSnapshotCopyStatusDeserializer; impl ClusterSnapshotCopyStatusDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterSnapshotCopyStatus, _>( tag_name, stack, |name, stack, obj| { match name { "DestinationRegion" => { obj.destination_region = Some(StringDeserializer::deserialize("DestinationRegion", stack)?); } "ManualSnapshotRetentionPeriod" => { obj.manual_snapshot_retention_period = Some(IntegerDeserializer::deserialize( "ManualSnapshotRetentionPeriod", stack, )?); } "RetentionPeriod" => { obj.retention_period = Some(LongDeserializer::deserialize("RetentionPeriod", stack)?); } "SnapshotCopyGrantName" => { obj.snapshot_copy_grant_name = Some(StringDeserializer::deserialize( "SnapshotCopyGrantName", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes a subnet group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterSubnetGroup { ///

The name of the cluster subnet group.

pub cluster_subnet_group_name: Option, ///

The description of the cluster subnet group.

pub description: Option, ///

The status of the cluster subnet group. Possible values are Complete, Incomplete and Invalid.

pub subnet_group_status: Option, ///

A list of the VPC Subnet elements.

pub subnets: Option>, ///

The list of tags for the cluster subnet group.

pub tags: Option>, ///

The VPC ID of the cluster subnet group.

pub vpc_id: Option, } #[allow(dead_code)] struct ClusterSubnetGroupDeserializer; impl ClusterSubnetGroupDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterSubnetGroup, _>(tag_name, stack, |name, stack, obj| { match name { "ClusterSubnetGroupName" => { obj.cluster_subnet_group_name = Some(StringDeserializer::deserialize( "ClusterSubnetGroupName", stack, )?); } "Description" => { obj.description = Some(StringDeserializer::deserialize("Description", stack)?); } "SubnetGroupStatus" => { obj.subnet_group_status = Some(StringDeserializer::deserialize("SubnetGroupStatus", stack)?); } "Subnets" => { obj.subnets .get_or_insert(vec![]) .extend(SubnetListDeserializer::deserialize("Subnets", stack)?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } "VpcId" => { obj.vpc_id = Some(StringDeserializer::deserialize("VpcId", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Contains the output from the DescribeClusterSubnetGroups action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterSubnetGroupMessage { ///

A list of ClusterSubnetGroup instances.

pub cluster_subnet_groups: Option>, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, } #[allow(dead_code)] struct ClusterSubnetGroupMessageDeserializer; impl ClusterSubnetGroupMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterSubnetGroupMessage, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterSubnetGroups" => { obj.cluster_subnet_groups.get_or_insert(vec![]).extend( ClusterSubnetGroupsDeserializer::deserialize( "ClusterSubnetGroups", stack, )?, ); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct ClusterSubnetGroupsDeserializer; impl ClusterSubnetGroupsDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterSubnetGroup" { obj.push(ClusterSubnetGroupDeserializer::deserialize( "ClusterSubnetGroup", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes a cluster version, including the parameter group family and description of the version.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterVersion { ///

The name of the cluster parameter group family for the cluster.

pub cluster_parameter_group_family: Option, ///

The version number used by the cluster.

pub cluster_version: Option, ///

The description of the cluster version.

pub description: Option, } #[allow(dead_code)] struct ClusterVersionDeserializer; impl ClusterVersionDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterVersion, _>(tag_name, stack, |name, stack, obj| { match name { "ClusterParameterGroupFamily" => { obj.cluster_parameter_group_family = Some(StringDeserializer::deserialize( "ClusterParameterGroupFamily", stack, )?); } "ClusterVersion" => { obj.cluster_version = Some(StringDeserializer::deserialize("ClusterVersion", stack)?); } "Description" => { obj.description = Some(StringDeserializer::deserialize("Description", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct ClusterVersionListDeserializer; impl ClusterVersionListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterVersion" { obj.push(ClusterVersionDeserializer::deserialize( "ClusterVersion", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Contains the output from the DescribeClusterVersions action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClusterVersionsMessage { ///

A list of Version elements.

pub cluster_versions: Option>, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, } #[allow(dead_code)] struct ClusterVersionsMessageDeserializer; impl ClusterVersionsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClusterVersionsMessage, _>(tag_name, stack, |name, stack, obj| { match name { "ClusterVersions" => { obj.cluster_versions.get_or_insert(vec![]).extend( ClusterVersionListDeserializer::deserialize("ClusterVersions", stack)?, ); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Contains the output from the DescribeClusters action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ClustersMessage { ///

A list of Cluster objects, where each object describes one cluster.

pub clusters: Option>, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, } #[allow(dead_code)] struct ClustersMessageDeserializer; impl ClustersMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ClustersMessage, _>(tag_name, stack, |name, stack, obj| { match name { "Clusters" => { obj.clusters .get_or_insert(vec![]) .extend(ClusterListDeserializer::deserialize("Clusters", stack)?); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CopyClusterSnapshotMessage { ///

The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

The default value is -1.

pub manual_snapshot_retention_period: Option, ///

The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

Constraints:

  • Must be the identifier for a valid cluster.

pub source_snapshot_cluster_identifier: Option, ///

The identifier for the source snapshot.

Constraints:

  • Must be the identifier for a valid automated snapshot whose state is available.

pub source_snapshot_identifier: String, ///

The identifier given to the new manual snapshot.

Constraints:

  • Cannot be null, empty, or blank.

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for the AWS account that is making the request.

pub target_snapshot_identifier: String, } /// Serialize `CopyClusterSnapshotMessage` contents to a `SignedRequest`. struct CopyClusterSnapshotMessageSerializer; impl CopyClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CopyClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.manual_snapshot_retention_period { params.put( &format!("{}{}", prefix, "ManualSnapshotRetentionPeriod"), &field_value, ); } if let Some(ref field_value) = obj.source_snapshot_cluster_identifier { params.put( &format!("{}{}", prefix, "SourceSnapshotClusterIdentifier"), &field_value, ); } params.put( &format!("{}{}", prefix, "SourceSnapshotIdentifier"), &obj.source_snapshot_identifier, ); params.put( &format!("{}{}", prefix, "TargetSnapshotIdentifier"), &obj.target_snapshot_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CopyClusterSnapshotResult { pub snapshot: Option, } #[allow(dead_code)] struct CopyClusterSnapshotResultDeserializer; impl CopyClusterSnapshotResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CopyClusterSnapshotResult, _>( tag_name, stack, |name, stack, obj| { match name { "Snapshot" => { obj.snapshot = Some(SnapshotDeserializer::deserialize("Snapshot", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateClusterMessage { ///

Reserved.

pub additional_info: Option, ///

If true, major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.

When a new major version of the Amazon Redshift engine is released, you can request that the service automatically apply upgrades during the maintenance window to the Amazon Redshift engine that is running on your cluster.

Default: true

pub allow_version_upgrade: Option, ///

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

Default: 1

Constraints: Must be a value from 0 to 35.

pub automated_snapshot_retention_period: Option, ///

The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency.

Default: A random, system-chosen Availability Zone in the region that is specified by the endpoint.

Example: us-east-2d

Constraint: The specified Availability Zone must be in the same region as the current endpoint.

pub availability_zone: Option, ///

A unique identifier for the cluster. You use this identifier to refer to the cluster for any subsequent cluster operations such as deleting or modifying. The identifier also appears in the Amazon Redshift console.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an AWS account.

Example: myexamplecluster

pub cluster_identifier: String, ///

The name of the parameter group to be associated with this cluster.

Default: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups

Constraints:

  • Must be 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

pub cluster_parameter_group_name: Option, ///

A list of security groups to be associated with this cluster.

Default: The default cluster security group for Amazon Redshift.

pub cluster_security_groups: Option>, ///

The name of a cluster subnet group to be associated with this cluster.

If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC).

pub cluster_subnet_group_name: Option, ///

The type of the cluster. When cluster type is specified as

  • single-node, the NumberOfNodes parameter is not required.

  • multi-node, the NumberOfNodes parameter is required.

Valid Values: multi-node | single-node

Default: multi-node

pub cluster_type: Option, ///

The version of the Amazon Redshift engine software that you want to deploy on the cluster.

The version selected runs on all the nodes in the cluster.

Constraints: Only version 1.0 is currently available.

Example: 1.0

pub cluster_version: Option, ///

The name of the first database to be created when the cluster is created.

To create additional databases after the cluster is created, connect to the cluster with a SQL client and use SQL commands to create a database. For more information, go to Create a Database in the Amazon Redshift Database Developer Guide.

Default: dev

Constraints:

  • Must contain 1 to 64 alphanumeric characters.

  • Must contain only lowercase letters.

  • Cannot be a word that is reserved by the service. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

pub db_name: Option, ///

The Elastic IP (EIP) address for the cluster.

Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide.

pub elastic_ip: Option, ///

If true, the data in the cluster is encrypted at rest.

Default: false

pub encrypted: Option, ///

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

pub enhanced_vpc_routing: Option, ///

Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

pub hsm_client_certificate_identifier: Option, ///

Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

pub hsm_configuration_identifier: Option, ///

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

A cluster can have up to 10 IAM roles associated with it at any time.

pub iam_roles: Option>, ///

The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

pub kms_key_id: Option, ///

An optional parameter for the name of the maintenance track for the cluster. If you don't provide a maintenance track name, the cluster is assigned to the current track.

pub maintenance_track_name: Option, ///

The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots.

The value must be either -1 or an integer between 1 and 3,653.

pub manual_snapshot_retention_period: Option, ///

The password associated with the master user account for the cluster that is being created.

Constraints:

  • Must be between 8 and 64 characters in length.

  • Must contain at least one uppercase letter.

  • Must contain at least one lowercase letter.

  • Must contain one number.

  • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), " (double quote), \, /, @, or space.

pub master_user_password: String, ///

The user name associated with the master user account for the cluster that is being created.

Constraints:

  • Must be 1 - 128 alphanumeric characters. The user name can't be PUBLIC.

  • First character must be a letter.

  • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

pub master_username: String, ///

The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge

pub node_type: String, ///

The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node.

For information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

If you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster.

Default: 1

Constraints: Value must be at least 1 and no more than 100.

pub number_of_nodes: Option, ///

The port number on which the cluster accepts incoming connections.

The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.

Default: 5439

Valid Values: 1150-65535

pub port: Option, ///

The weekly time range (in UTC) during which automated cluster maintenance can occur.

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.

Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

Constraints: Minimum 30-minute window.

pub preferred_maintenance_window: Option, ///

If true, the cluster can be accessed from a public network.

pub publicly_accessible: Option, ///

A unique identifier for the snapshot schedule.

pub snapshot_schedule_identifier: Option, ///

A list of tag instances.

pub tags: Option>, ///

A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.

Default: The default VPC security group is associated with the cluster.

pub vpc_security_group_ids: Option>, } /// Serialize `CreateClusterMessage` contents to a `SignedRequest`. struct CreateClusterMessageSerializer; impl CreateClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.additional_info { params.put(&format!("{}{}", prefix, "AdditionalInfo"), &field_value); } if let Some(ref field_value) = obj.allow_version_upgrade { params.put( &format!("{}{}", prefix, "AllowVersionUpgrade"), &field_value, ); } if let Some(ref field_value) = obj.automated_snapshot_retention_period { params.put( &format!("{}{}", prefix, "AutomatedSnapshotRetentionPeriod"), &field_value, ); } if let Some(ref field_value) = obj.availability_zone { params.put(&format!("{}{}", prefix, "AvailabilityZone"), &field_value); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.cluster_parameter_group_name { params.put( &format!("{}{}", prefix, "ClusterParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.cluster_security_groups { ClusterSecurityGroupNameListSerializer::serialize( params, &format!("{}{}", prefix, "ClusterSecurityGroupName"), field_value, ); } if let Some(ref field_value) = obj.cluster_subnet_group_name { params.put( &format!("{}{}", prefix, "ClusterSubnetGroupName"), &field_value, ); } if let Some(ref field_value) = obj.cluster_type { params.put(&format!("{}{}", prefix, "ClusterType"), &field_value); } if let Some(ref field_value) = obj.cluster_version { params.put(&format!("{}{}", prefix, "ClusterVersion"), &field_value); } if let Some(ref field_value) = obj.db_name { params.put(&format!("{}{}", prefix, "DBName"), &field_value); } if let Some(ref field_value) = obj.elastic_ip { params.put(&format!("{}{}", prefix, "ElasticIp"), &field_value); } if let Some(ref field_value) = obj.encrypted { params.put(&format!("{}{}", prefix, "Encrypted"), &field_value); } if let Some(ref field_value) = obj.enhanced_vpc_routing { params.put(&format!("{}{}", prefix, "EnhancedVpcRouting"), &field_value); } if let Some(ref field_value) = obj.hsm_client_certificate_identifier { params.put( &format!("{}{}", prefix, "HsmClientCertificateIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.hsm_configuration_identifier { params.put( &format!("{}{}", prefix, "HsmConfigurationIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.iam_roles { IamRoleArnListSerializer::serialize( params, &format!("{}{}", prefix, "IamRoleArn"), field_value, ); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } if let Some(ref field_value) = obj.maintenance_track_name { params.put( &format!("{}{}", prefix, "MaintenanceTrackName"), &field_value, ); } if let Some(ref field_value) = obj.manual_snapshot_retention_period { params.put( &format!("{}{}", prefix, "ManualSnapshotRetentionPeriod"), &field_value, ); } params.put( &format!("{}{}", prefix, "MasterUserPassword"), &obj.master_user_password, ); params.put( &format!("{}{}", prefix, "MasterUsername"), &obj.master_username, ); params.put(&format!("{}{}", prefix, "NodeType"), &obj.node_type); if let Some(ref field_value) = obj.number_of_nodes { params.put(&format!("{}{}", prefix, "NumberOfNodes"), &field_value); } if let Some(ref field_value) = obj.port { params.put(&format!("{}{}", prefix, "Port"), &field_value); } if let Some(ref field_value) = obj.preferred_maintenance_window { params.put( &format!("{}{}", prefix, "PreferredMaintenanceWindow"), &field_value, ); } if let Some(ref field_value) = obj.publicly_accessible { params.put(&format!("{}{}", prefix, "PubliclyAccessible"), &field_value); } if let Some(ref field_value) = obj.snapshot_schedule_identifier { params.put( &format!("{}{}", prefix, "SnapshotScheduleIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateClusterParameterGroupMessage { ///

A description of the parameter group.

pub description: String, ///

The Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters.

To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. By default, Amazon Redshift returns a list of all the parameter groups that are owned by your AWS account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is "redshift-1.0".

pub parameter_group_family: String, ///

The name of the cluster parameter group.

Constraints:

  • Must be 1 to 255 alphanumeric characters or hyphens

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique withing your AWS account.

This value is stored as a lower-case string.

pub parameter_group_name: String, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateClusterParameterGroupMessage` contents to a `SignedRequest`. struct CreateClusterParameterGroupMessageSerializer; impl CreateClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "Description"), &obj.description); params.put( &format!("{}{}", prefix, "ParameterGroupFamily"), &obj.parameter_group_family, ); params.put( &format!("{}{}", prefix, "ParameterGroupName"), &obj.parameter_group_name, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateClusterParameterGroupResult { pub cluster_parameter_group: Option, } #[allow(dead_code)] struct CreateClusterParameterGroupResultDeserializer; impl CreateClusterParameterGroupResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateClusterParameterGroupResult, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterParameterGroup" => { obj.cluster_parameter_group = Some(ClusterParameterGroupDeserializer::deserialize( "ClusterParameterGroup", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateClusterResult { pub cluster: Option, } #[allow(dead_code)] struct CreateClusterResultDeserializer; impl CreateClusterResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateClusterResult, _>(tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateClusterSecurityGroupMessage { ///

The name for the security group. Amazon Redshift stores the value as a lowercase string.

Constraints:

  • Must contain no more than 255 alphanumeric characters or hyphens.

  • Must not be "Default".

  • Must be unique for all security groups that are created by your AWS account.

Example: examplesecuritygroup

pub cluster_security_group_name: String, ///

A description for the security group.

pub description: String, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateClusterSecurityGroupMessage` contents to a `SignedRequest`. struct CreateClusterSecurityGroupMessageSerializer; impl CreateClusterSecurityGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateClusterSecurityGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterSecurityGroupName"), &obj.cluster_security_group_name, ); params.put(&format!("{}{}", prefix, "Description"), &obj.description); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateClusterSecurityGroupResult { pub cluster_security_group: Option, } #[allow(dead_code)] struct CreateClusterSecurityGroupResultDeserializer; impl CreateClusterSecurityGroupResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateClusterSecurityGroupResult, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterSecurityGroup" => { obj.cluster_security_group = Some(ClusterSecurityGroupDeserializer::deserialize( "ClusterSecurityGroup", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateClusterSnapshotMessage { ///

The cluster identifier for which you want a snapshot.

pub cluster_identifier: String, ///

The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

The default value is -1.

pub manual_snapshot_retention_period: Option, ///

A unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the AWS account.

Constraints:

  • Cannot be null, empty, or blank

  • Must contain from 1 to 255 alphanumeric characters or hyphens

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

Example: my-snapshot-id

pub snapshot_identifier: String, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateClusterSnapshotMessage` contents to a `SignedRequest`. struct CreateClusterSnapshotMessageSerializer; impl CreateClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.manual_snapshot_retention_period { params.put( &format!("{}{}", prefix, "ManualSnapshotRetentionPeriod"), &field_value, ); } params.put( &format!("{}{}", prefix, "SnapshotIdentifier"), &obj.snapshot_identifier, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateClusterSnapshotResult { pub snapshot: Option, } #[allow(dead_code)] struct CreateClusterSnapshotResultDeserializer; impl CreateClusterSnapshotResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateClusterSnapshotResult, _>( tag_name, stack, |name, stack, obj| { match name { "Snapshot" => { obj.snapshot = Some(SnapshotDeserializer::deserialize("Snapshot", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateClusterSubnetGroupMessage { ///

The name for the subnet group. Amazon Redshift stores the value as a lowercase string.

Constraints:

  • Must contain no more than 255 alphanumeric characters or hyphens.

  • Must not be "Default".

  • Must be unique for all subnet groups that are created by your AWS account.

Example: examplesubnetgroup

pub cluster_subnet_group_name: String, ///

A description for the subnet group.

pub description: String, ///

An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request.

pub subnet_ids: Vec, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateClusterSubnetGroupMessage` contents to a `SignedRequest`. struct CreateClusterSubnetGroupMessageSerializer; impl CreateClusterSubnetGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateClusterSubnetGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterSubnetGroupName"), &obj.cluster_subnet_group_name, ); params.put(&format!("{}{}", prefix, "Description"), &obj.description); SubnetIdentifierListSerializer::serialize( params, &format!("{}{}", prefix, "SubnetIdentifier"), &obj.subnet_ids, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateClusterSubnetGroupResult { pub cluster_subnet_group: Option, } #[allow(dead_code)] struct CreateClusterSubnetGroupResultDeserializer; impl CreateClusterSubnetGroupResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateClusterSubnetGroupResult, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterSubnetGroup" => { obj.cluster_subnet_group = Some(ClusterSubnetGroupDeserializer::deserialize( "ClusterSubnetGroup", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateEventSubscriptionMessage { ///

A boolean value; set to true to activate the subscription, and set to false to create the subscription but not activate it.

pub enabled: Option, ///

Specifies the Amazon Redshift event categories to be published by the event notification subscription.

Values: configuration, management, monitoring, security

pub event_categories: Option>, ///

Specifies the Amazon Redshift event severity to be published by the event notification subscription.

Values: ERROR, INFO

pub severity: Option, ///

The Amazon Resource Name (ARN) of the Amazon SNS topic used to transmit the event notifications. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

pub sns_topic_arn: String, ///

A list of one or more identifiers of Amazon Redshift source objects. All of the objects must be of the same type as was specified in the source type parameter. The event subscription will return only events generated by the specified objects. If not specified, then events are returned for all objects within the source type specified.

Example: my-cluster-1, my-cluster-2

Example: my-snapshot-20131010

pub source_ids: Option>, ///

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.

pub source_type: Option, ///

The name of the event subscription to be created.

Constraints:

  • Cannot be null, empty, or blank.

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

pub subscription_name: String, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateEventSubscriptionMessage` contents to a `SignedRequest`. struct CreateEventSubscriptionMessageSerializer; impl CreateEventSubscriptionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateEventSubscriptionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.enabled { params.put(&format!("{}{}", prefix, "Enabled"), &field_value); } if let Some(ref field_value) = obj.event_categories { EventCategoriesListSerializer::serialize( params, &format!("{}{}", prefix, "EventCategory"), field_value, ); } if let Some(ref field_value) = obj.severity { params.put(&format!("{}{}", prefix, "Severity"), &field_value); } params.put(&format!("{}{}", prefix, "SnsTopicArn"), &obj.sns_topic_arn); if let Some(ref field_value) = obj.source_ids { SourceIdsListSerializer::serialize( params, &format!("{}{}", prefix, "SourceId"), field_value, ); } if let Some(ref field_value) = obj.source_type { params.put(&format!("{}{}", prefix, "SourceType"), &field_value); } params.put( &format!("{}{}", prefix, "SubscriptionName"), &obj.subscription_name, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateEventSubscriptionResult { pub event_subscription: Option, } #[allow(dead_code)] struct CreateEventSubscriptionResultDeserializer; impl CreateEventSubscriptionResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateEventSubscriptionResult, _>( tag_name, stack, |name, stack, obj| { match name { "EventSubscription" => { obj.event_subscription = Some(EventSubscriptionDeserializer::deserialize( "EventSubscription", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateHsmClientCertificateMessage { ///

The identifier to be assigned to the new HSM client certificate that the cluster will use to connect to the HSM to use the database encryption keys.

pub hsm_client_certificate_identifier: String, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateHsmClientCertificateMessage` contents to a `SignedRequest`. struct CreateHsmClientCertificateMessageSerializer; impl CreateHsmClientCertificateMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateHsmClientCertificateMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "HsmClientCertificateIdentifier"), &obj.hsm_client_certificate_identifier, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateHsmClientCertificateResult { pub hsm_client_certificate: Option, } #[allow(dead_code)] struct CreateHsmClientCertificateResultDeserializer; impl CreateHsmClientCertificateResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateHsmClientCertificateResult, _>( tag_name, stack, |name, stack, obj| { match name { "HsmClientCertificate" => { obj.hsm_client_certificate = Some(HsmClientCertificateDeserializer::deserialize( "HsmClientCertificate", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateHsmConfigurationMessage { ///

A text description of the HSM configuration to be created.

pub description: String, ///

The identifier to be assigned to the new Amazon Redshift HSM configuration.

pub hsm_configuration_identifier: String, ///

The IP address that the Amazon Redshift cluster must use to access the HSM.

pub hsm_ip_address: String, ///

The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.

pub hsm_partition_name: String, ///

The password required to access the HSM partition.

pub hsm_partition_password: String, ///

The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.

pub hsm_server_public_certificate: String, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateHsmConfigurationMessage` contents to a `SignedRequest`. struct CreateHsmConfigurationMessageSerializer; impl CreateHsmConfigurationMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateHsmConfigurationMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "Description"), &obj.description); params.put( &format!("{}{}", prefix, "HsmConfigurationIdentifier"), &obj.hsm_configuration_identifier, ); params.put( &format!("{}{}", prefix, "HsmIpAddress"), &obj.hsm_ip_address, ); params.put( &format!("{}{}", prefix, "HsmPartitionName"), &obj.hsm_partition_name, ); params.put( &format!("{}{}", prefix, "HsmPartitionPassword"), &obj.hsm_partition_password, ); params.put( &format!("{}{}", prefix, "HsmServerPublicCertificate"), &obj.hsm_server_public_certificate, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateHsmConfigurationResult { pub hsm_configuration: Option, } #[allow(dead_code)] struct CreateHsmConfigurationResultDeserializer; impl CreateHsmConfigurationResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateHsmConfigurationResult, _>( tag_name, stack, |name, stack, obj| { match name { "HsmConfiguration" => { obj.hsm_configuration = Some(HsmConfigurationDeserializer::deserialize( "HsmConfiguration", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateScheduledActionMessage { ///

If true, the schedule is enabled. If false, the scheduled action does not trigger. For more information about state of the scheduled action, see ScheduledAction.

pub enable: Option, ///

The end time in UTC of the scheduled action. After this time, the scheduled action does not trigger. For more information about this parameter, see ScheduledAction.

pub end_time: Option, ///

The IAM role to assume to run the target action. For more information about this parameter, see ScheduledAction.

pub iam_role: String, ///

The schedule in at( ) or cron( ) format. For more information about this parameter, see ScheduledAction.

pub schedule: String, ///

The description of the scheduled action.

pub scheduled_action_description: Option, ///

The name of the scheduled action. The name must be unique within an account. For more information about this parameter, see ScheduledAction.

pub scheduled_action_name: String, ///

The start time in UTC of the scheduled action. Before this time, the scheduled action does not trigger. For more information about this parameter, see ScheduledAction.

pub start_time: Option, ///

A JSON format string of the Amazon Redshift API operation with input parameters. For more information about this parameter, see ScheduledAction.

pub target_action: ScheduledActionType, } /// Serialize `CreateScheduledActionMessage` contents to a `SignedRequest`. struct CreateScheduledActionMessageSerializer; impl CreateScheduledActionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateScheduledActionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.enable { params.put(&format!("{}{}", prefix, "Enable"), &field_value); } if let Some(ref field_value) = obj.end_time { params.put(&format!("{}{}", prefix, "EndTime"), &field_value); } params.put(&format!("{}{}", prefix, "IamRole"), &obj.iam_role); params.put(&format!("{}{}", prefix, "Schedule"), &obj.schedule); if let Some(ref field_value) = obj.scheduled_action_description { params.put( &format!("{}{}", prefix, "ScheduledActionDescription"), &field_value, ); } params.put( &format!("{}{}", prefix, "ScheduledActionName"), &obj.scheduled_action_name, ); if let Some(ref field_value) = obj.start_time { params.put(&format!("{}{}", prefix, "StartTime"), &field_value); } ScheduledActionTypeSerializer::serialize( params, &format!("{}{}", prefix, "TargetAction"), &obj.target_action, ); } } ///

The result of the CreateSnapshotCopyGrant action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateSnapshotCopyGrantMessage { ///

The unique identifier of the customer master key (CMK) to which to grant Amazon Redshift permission. If no key is specified, the default key is used.

pub kms_key_id: Option, ///

The name of the snapshot copy grant. This name must be unique in the region for the AWS account.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an AWS account.

pub snapshot_copy_grant_name: String, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateSnapshotCopyGrantMessage` contents to a `SignedRequest`. struct CreateSnapshotCopyGrantMessageSerializer; impl CreateSnapshotCopyGrantMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateSnapshotCopyGrantMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } params.put( &format!("{}{}", prefix, "SnapshotCopyGrantName"), &obj.snapshot_copy_grant_name, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CreateSnapshotCopyGrantResult { pub snapshot_copy_grant: Option, } #[allow(dead_code)] struct CreateSnapshotCopyGrantResultDeserializer; impl CreateSnapshotCopyGrantResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CreateSnapshotCopyGrantResult, _>( tag_name, stack, |name, stack, obj| { match name { "SnapshotCopyGrant" => { obj.snapshot_copy_grant = Some(SnapshotCopyGrantDeserializer::deserialize( "SnapshotCopyGrant", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateSnapshotScheduleMessage { ///

pub dry_run: Option, ///

pub next_invocations: Option, ///

The definition of the snapshot schedule. The definition is made up of schedule expressions, for example "cron(30 12 *)" or "rate(12 hours)".

pub schedule_definitions: Option>, ///

The description of the snapshot schedule.

pub schedule_description: Option, ///

A unique identifier for a snapshot schedule. Only alphanumeric characters are allowed for the identifier.

pub schedule_identifier: Option, ///

An optional set of tags you can use to search for the schedule.

pub tags: Option>, } /// Serialize `CreateSnapshotScheduleMessage` contents to a `SignedRequest`. struct CreateSnapshotScheduleMessageSerializer; impl CreateSnapshotScheduleMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateSnapshotScheduleMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.dry_run { params.put(&format!("{}{}", prefix, "DryRun"), &field_value); } if let Some(ref field_value) = obj.next_invocations { params.put(&format!("{}{}", prefix, "NextInvocations"), &field_value); } if let Some(ref field_value) = obj.schedule_definitions { ScheduleDefinitionListSerializer::serialize( params, &format!("{}{}", prefix, "ScheduleDefinition"), field_value, ); } if let Some(ref field_value) = obj.schedule_description { params.put( &format!("{}{}", prefix, "ScheduleDescription"), &field_value, ); } if let Some(ref field_value) = obj.schedule_identifier { params.put(&format!("{}{}", prefix, "ScheduleIdentifier"), &field_value); } if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } ///

Contains the output from the CreateTags action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateTagsMessage { ///

The Amazon Resource Name (ARN) to which you want to add the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

pub resource_name: String, ///

One or more name/value pairs to add as tags to the specified resource. Each tag name is passed in with the parameter Key and the corresponding value is passed in with the parameter Value. The Key and Value parameters are separated by a comma (,). Separate multiple tags with a space. For example, --tags "Key"="owner","Value"="admin" "Key"="environment","Value"="test" "Key"="version","Value"="1.0".

pub tags: Vec, } /// Serialize `CreateTagsMessage` contents to a `SignedRequest`. struct CreateTagsMessageSerializer; impl CreateTagsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateTagsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "ResourceName"), &obj.resource_name); TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), &obj.tags); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct CreateUsageLimitMessage { ///

The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB). The value must be a positive number.

pub amount: i64, ///

The action that Amazon Redshift takes when the limit is reached. The default is log. For more information about this parameter, see UsageLimit.

pub breach_action: Option, ///

The identifier of the cluster that you want to limit usage.

pub cluster_identifier: String, ///

The Amazon Redshift feature that you want to limit.

pub feature_type: String, ///

The type of limit. Depending on the feature type, this can be based on a time duration or data size. If FeatureType is spectrum, then LimitType must be data-scanned. If FeatureType is concurrency-scaling, then LimitType must be time.

pub limit_type: String, ///

The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly.

pub period: Option, ///

A list of tag instances.

pub tags: Option>, } /// Serialize `CreateUsageLimitMessage` contents to a `SignedRequest`. struct CreateUsageLimitMessageSerializer; impl CreateUsageLimitMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateUsageLimitMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "Amount"), &obj.amount); if let Some(ref field_value) = obj.breach_action { params.put(&format!("{}{}", prefix, "BreachAction"), &field_value); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); params.put(&format!("{}{}", prefix, "FeatureType"), &obj.feature_type); params.put(&format!("{}{}", prefix, "LimitType"), &obj.limit_type); if let Some(ref field_value) = obj.period { params.put(&format!("{}{}", prefix, "Period"), &field_value); } if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct CustomerStorageMessage { ///

The total amount of storage currently used for snapshots.

pub total_backup_size_in_mega_bytes: Option, ///

The total amount of storage currently provisioned.

pub total_provisioned_storage_in_mega_bytes: Option, } #[allow(dead_code)] struct CustomerStorageMessageDeserializer; impl CustomerStorageMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, CustomerStorageMessage, _>(tag_name, stack, |name, stack, obj| { match name { "TotalBackupSizeInMegaBytes" => { obj.total_backup_size_in_mega_bytes = Some(DoubleDeserializer::deserialize( "TotalBackupSizeInMegaBytes", stack, )?); } "TotalProvisionedStorageInMegaBytes" => { obj.total_provisioned_storage_in_mega_bytes = Some(DoubleDeserializer::deserialize( "TotalProvisionedStorageInMegaBytes", stack, )?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes the status of a cluster while it is in the process of resizing with an incremental resize.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct DataTransferProgress { ///

Describes the data transfer rate in MB's per second.

pub current_rate_in_mega_bytes_per_second: Option, ///

Describes the total amount of data that has been transfered in MB's.

pub data_transferred_in_mega_bytes: Option, ///

Describes the number of seconds that have elapsed during the data transfer.

pub elapsed_time_in_seconds: Option, ///

Describes the estimated number of seconds remaining to complete the transfer.

pub estimated_time_to_completion_in_seconds: Option, ///

Describes the status of the cluster. While the transfer is in progress the status is transferringdata.

pub status: Option, ///

Describes the total amount of data to be transfered in megabytes.

pub total_data_in_mega_bytes: Option, } #[allow(dead_code)] struct DataTransferProgressDeserializer; impl DataTransferProgressDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, DataTransferProgress, _>(tag_name, stack, |name, stack, obj| { match name { "CurrentRateInMegaBytesPerSecond" => { obj.current_rate_in_mega_bytes_per_second = Some(DoubleOptionalDeserializer::deserialize( "CurrentRateInMegaBytesPerSecond", stack, )?); } "DataTransferredInMegaBytes" => { obj.data_transferred_in_mega_bytes = Some(LongDeserializer::deserialize( "DataTransferredInMegaBytes", stack, )?); } "ElapsedTimeInSeconds" => { obj.elapsed_time_in_seconds = Some(LongOptionalDeserializer::deserialize( "ElapsedTimeInSeconds", stack, )?); } "EstimatedTimeToCompletionInSeconds" => { obj.estimated_time_to_completion_in_seconds = Some(LongOptionalDeserializer::deserialize( "EstimatedTimeToCompletionInSeconds", stack, )?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } "TotalDataInMegaBytes" => { obj.total_data_in_mega_bytes = Some(LongDeserializer::deserialize( "TotalDataInMegaBytes", stack, )?); } _ => skip_tree(stack), } Ok(()) }) } } /// Serialize `DbGroupList` contents to a `SignedRequest`. struct DbGroupListSerializer; impl DbGroupListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } ///

Describes the default cluster parameters for a parameter group family.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct DefaultClusterParameters { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

The name of the cluster parameter group family to which the engine default parameters apply.

pub parameter_group_family: Option, ///

The list of cluster default parameters.

pub parameters: Option>, } #[allow(dead_code)] struct DefaultClusterParametersDeserializer; impl DefaultClusterParametersDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, DefaultClusterParameters, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "ParameterGroupFamily" => { obj.parameter_group_family = Some(StringDeserializer::deserialize( "ParameterGroupFamily", stack, )?); } "Parameters" => { obj.parameters.get_or_insert(vec![]).extend( ParametersListDeserializer::deserialize("Parameters", stack)?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes a deferred maintenance window

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct DeferredMaintenanceWindow { ///

A timestamp for the end of the time period when we defer maintenance.

pub defer_maintenance_end_time: Option, ///

A unique identifier for the maintenance window.

pub defer_maintenance_identifier: Option, ///

A timestamp for the beginning of the time period when we defer maintenance.

pub defer_maintenance_start_time: Option, } #[allow(dead_code)] struct DeferredMaintenanceWindowDeserializer; impl DeferredMaintenanceWindowDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, DeferredMaintenanceWindow, _>( tag_name, stack, |name, stack, obj| { match name { "DeferMaintenanceEndTime" => { obj.defer_maintenance_end_time = Some(TStampDeserializer::deserialize( "DeferMaintenanceEndTime", stack, )?); } "DeferMaintenanceIdentifier" => { obj.defer_maintenance_identifier = Some(StringDeserializer::deserialize( "DeferMaintenanceIdentifier", stack, )?); } "DeferMaintenanceStartTime" => { obj.defer_maintenance_start_time = Some(TStampDeserializer::deserialize( "DeferMaintenanceStartTime", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct DeferredMaintenanceWindowsListDeserializer; impl DeferredMaintenanceWindowsListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "DeferredMaintenanceWindow" { obj.push(DeferredMaintenanceWindowDeserializer::deserialize( "DeferredMaintenanceWindow", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteClusterMessage { ///

The identifier of the cluster to be deleted.

Constraints:

  • Must contain lowercase characters.

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

pub cluster_identifier: String, ///

The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot must be false.

Constraints:

  • Must be 1 to 255 alphanumeric characters.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

pub final_cluster_snapshot_identifier: Option, ///

The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

The default value is -1.

pub final_cluster_snapshot_retention_period: Option, ///

Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true, a final cluster snapshot is not created. If false, a final cluster snapshot is created before the cluster is deleted.

The FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot is false.

Default: false

pub skip_final_cluster_snapshot: Option, } /// Serialize `DeleteClusterMessage` contents to a `SignedRequest`. struct DeleteClusterMessageSerializer; impl DeleteClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.final_cluster_snapshot_identifier { params.put( &format!("{}{}", prefix, "FinalClusterSnapshotIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.final_cluster_snapshot_retention_period { params.put( &format!("{}{}", prefix, "FinalClusterSnapshotRetentionPeriod"), &field_value, ); } if let Some(ref field_value) = obj.skip_final_cluster_snapshot { params.put( &format!("{}{}", prefix, "SkipFinalClusterSnapshot"), &field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteClusterParameterGroupMessage { ///

The name of the parameter group to be deleted.

Constraints:

  • Must be the name of an existing cluster parameter group.

  • Cannot delete a default cluster parameter group.

pub parameter_group_name: String, } /// Serialize `DeleteClusterParameterGroupMessage` contents to a `SignedRequest`. struct DeleteClusterParameterGroupMessageSerializer; impl DeleteClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ParameterGroupName"), &obj.parameter_group_name, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct DeleteClusterResult { pub cluster: Option, } #[allow(dead_code)] struct DeleteClusterResultDeserializer; impl DeleteClusterResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, DeleteClusterResult, _>(tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteClusterSecurityGroupMessage { ///

The name of the cluster security group to be deleted.

pub cluster_security_group_name: String, } /// Serialize `DeleteClusterSecurityGroupMessage` contents to a `SignedRequest`. struct DeleteClusterSecurityGroupMessageSerializer; impl DeleteClusterSecurityGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteClusterSecurityGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterSecurityGroupName"), &obj.cluster_security_group_name, ); } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteClusterSnapshotMessage { ///

The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

Constraints: Must be the name of valid cluster.

pub snapshot_cluster_identifier: Option, ///

The unique identifier of the manual snapshot to be deleted.

Constraints: Must be the name of an existing snapshot that is in the available, failed, or cancelled state.

pub snapshot_identifier: String, } /// Serialize `DeleteClusterSnapshotMessage` contents to a `SignedRequest`. struct DeleteClusterSnapshotMessageSerializer; impl DeleteClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.snapshot_cluster_identifier { params.put( &format!("{}{}", prefix, "SnapshotClusterIdentifier"), &field_value, ); } params.put( &format!("{}{}", prefix, "SnapshotIdentifier"), &obj.snapshot_identifier, ); } } /// Serialize `DeleteClusterSnapshotMessageList` contents to a `SignedRequest`. struct DeleteClusterSnapshotMessageListSerializer; impl DeleteClusterSnapshotMessageListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); DeleteClusterSnapshotMessageSerializer::serialize(params, &key, obj); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct DeleteClusterSnapshotResult { pub snapshot: Option, } #[allow(dead_code)] struct DeleteClusterSnapshotResultDeserializer; impl DeleteClusterSnapshotResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, DeleteClusterSnapshotResult, _>( tag_name, stack, |name, stack, obj| { match name { "Snapshot" => { obj.snapshot = Some(SnapshotDeserializer::deserialize("Snapshot", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteClusterSubnetGroupMessage { ///

The name of the cluster subnet group name to be deleted.

pub cluster_subnet_group_name: String, } /// Serialize `DeleteClusterSubnetGroupMessage` contents to a `SignedRequest`. struct DeleteClusterSubnetGroupMessageSerializer; impl DeleteClusterSubnetGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteClusterSubnetGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterSubnetGroupName"), &obj.cluster_subnet_group_name, ); } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteEventSubscriptionMessage { ///

The name of the Amazon Redshift event notification subscription to be deleted.

pub subscription_name: String, } /// Serialize `DeleteEventSubscriptionMessage` contents to a `SignedRequest`. struct DeleteEventSubscriptionMessageSerializer; impl DeleteEventSubscriptionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteEventSubscriptionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "SubscriptionName"), &obj.subscription_name, ); } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteHsmClientCertificateMessage { ///

The identifier of the HSM client certificate to be deleted.

pub hsm_client_certificate_identifier: String, } /// Serialize `DeleteHsmClientCertificateMessage` contents to a `SignedRequest`. struct DeleteHsmClientCertificateMessageSerializer; impl DeleteHsmClientCertificateMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteHsmClientCertificateMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "HsmClientCertificateIdentifier"), &obj.hsm_client_certificate_identifier, ); } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteHsmConfigurationMessage { ///

The identifier of the Amazon Redshift HSM configuration to be deleted.

pub hsm_configuration_identifier: String, } /// Serialize `DeleteHsmConfigurationMessage` contents to a `SignedRequest`. struct DeleteHsmConfigurationMessageSerializer; impl DeleteHsmConfigurationMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteHsmConfigurationMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "HsmConfigurationIdentifier"), &obj.hsm_configuration_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteScheduledActionMessage { ///

The name of the scheduled action to delete.

pub scheduled_action_name: String, } /// Serialize `DeleteScheduledActionMessage` contents to a `SignedRequest`. struct DeleteScheduledActionMessageSerializer; impl DeleteScheduledActionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteScheduledActionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ScheduledActionName"), &obj.scheduled_action_name, ); } } ///

The result of the DeleteSnapshotCopyGrant action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteSnapshotCopyGrantMessage { ///

The name of the snapshot copy grant to delete.

pub snapshot_copy_grant_name: String, } /// Serialize `DeleteSnapshotCopyGrantMessage` contents to a `SignedRequest`. struct DeleteSnapshotCopyGrantMessageSerializer; impl DeleteSnapshotCopyGrantMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteSnapshotCopyGrantMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "SnapshotCopyGrantName"), &obj.snapshot_copy_grant_name, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteSnapshotScheduleMessage { ///

A unique identifier of the snapshot schedule to delete.

pub schedule_identifier: String, } /// Serialize `DeleteSnapshotScheduleMessage` contents to a `SignedRequest`. struct DeleteSnapshotScheduleMessageSerializer; impl DeleteSnapshotScheduleMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteSnapshotScheduleMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ScheduleIdentifier"), &obj.schedule_identifier, ); } } ///

Contains the output from the DeleteTags action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteTagsMessage { ///

The Amazon Resource Name (ARN) from which you want to remove the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

pub resource_name: String, ///

The tag key that you want to delete.

pub tag_keys: Vec, } /// Serialize `DeleteTagsMessage` contents to a `SignedRequest`. struct DeleteTagsMessageSerializer; impl DeleteTagsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteTagsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "ResourceName"), &obj.resource_name); TagKeyListSerializer::serialize(params, &format!("{}{}", prefix, "TagKey"), &obj.tag_keys); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DeleteUsageLimitMessage { ///

The identifier of the usage limit to delete.

pub usage_limit_id: String, } /// Serialize `DeleteUsageLimitMessage` contents to a `SignedRequest`. struct DeleteUsageLimitMessageSerializer; impl DeleteUsageLimitMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteUsageLimitMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "UsageLimitId"), &obj.usage_limit_id, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeAccountAttributesMessage { ///

A list of attribute names.

pub attribute_names: Option>, } /// Serialize `DescribeAccountAttributesMessage` contents to a `SignedRequest`. struct DescribeAccountAttributesMessageSerializer; impl DescribeAccountAttributesMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeAccountAttributesMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.attribute_names { AttributeNameListSerializer::serialize( params, &format!("{}{}", prefix, "AttributeName"), field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClusterDbRevisionsMessage { ///

A unique identifier for a cluster whose ClusterDbRevisions you are requesting. This parameter is case sensitive. All clusters defined for an account are returned by default.

pub cluster_identifier: Option, ///

An optional parameter that specifies the starting point for returning a set of response records. When the results of a DescribeClusterDbRevisions request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.

Constraints: You can specify either the ClusterIdentifier parameter, or the marker parameter, but not both.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, } /// Serialize `DescribeClusterDbRevisionsMessage` contents to a `SignedRequest`. struct DescribeClusterDbRevisionsMessageSerializer; impl DescribeClusterDbRevisionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClusterDbRevisionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_identifier { params.put(&format!("{}{}", prefix, "ClusterIdentifier"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClusterParameterGroupsMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameterGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The name of a specific parameter group for which to return details. By default, details about all parameter groups and the default parameter group are returned.

pub parameter_group_name: Option, ///

A tag key or keys for which you want to return all matching cluster parameter groups that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching cluster parameter groups that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeClusterParameterGroupsMessage` contents to a `SignedRequest`. struct DescribeClusterParameterGroupsMessageSerializer; impl DescribeClusterParameterGroupsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClusterParameterGroupsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.parameter_group_name { params.put(&format!("{}{}", prefix, "ParameterGroupName"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClusterParametersMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterParameters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The name of a cluster parameter group for which to return details.

pub parameter_group_name: String, ///

The parameter types to return. Specify user to show parameters that are different form the default. Similarly, specify engine-default to show parameters that are the same as the default parameter group.

Default: All parameter types returned.

Valid Values: user | engine-default

pub source: Option, } /// Serialize `DescribeClusterParametersMessage` contents to a `SignedRequest`. struct DescribeClusterParametersMessageSerializer; impl DescribeClusterParametersMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClusterParametersMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } params.put( &format!("{}{}", prefix, "ParameterGroupName"), &obj.parameter_group_name, ); if let Some(ref field_value) = obj.source { params.put(&format!("{}{}", prefix, "Source"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClusterSecurityGroupsMessage { ///

The name of a cluster security group for which you are requesting details. You can specify either the Marker parameter or a ClusterSecurityGroupName parameter, but not both.

Example: securitygroup1

pub cluster_security_group_name: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSecurityGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the ClusterSecurityGroupName parameter or the Marker parameter, but not both.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

A tag key or keys for which you want to return all matching cluster security groups that are associated with the specified key or keys. For example, suppose that you have security groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the security groups that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching cluster security groups that are associated with the specified tag value or values. For example, suppose that you have security groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the security groups that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeClusterSecurityGroupsMessage` contents to a `SignedRequest`. struct DescribeClusterSecurityGroupsMessageSerializer; impl DescribeClusterSecurityGroupsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClusterSecurityGroupsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_security_group_name { params.put( &format!("{}{}", prefix, "ClusterSecurityGroupName"), &field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClusterSnapshotsMessage { ///

A value that indicates whether to return snapshots only for an existing cluster. You can perform table-level restore only by using a snapshot of an existing cluster, that is, a cluster that has not been deleted. Values for this parameter work as follows:

  • If ClusterExists is set to true, ClusterIdentifier is required.

  • If ClusterExists is set to false and ClusterIdentifier isn't specified, all snapshots associated with deleted clusters (orphaned snapshots) are returned.

  • If ClusterExists is set to false and ClusterIdentifier is specified for a deleted cluster, snapshots associated with that cluster are returned.

  • If ClusterExists is set to false and ClusterIdentifier is specified for an existing cluster, no snapshots are returned.

pub cluster_exists: Option, ///

The identifier of the cluster which generated the requested snapshots.

pub cluster_identifier: Option, ///

A time value that requests only snapshots created at or before the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

Example: 2012-07-16T18:00:00Z

pub end_time: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSnapshots request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The AWS customer account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your AWS customer account, or do not specify the parameter.

pub owner_account: Option, ///

The snapshot identifier of the snapshot about which to return information.

pub snapshot_identifier: Option, ///

The type of snapshots for which you are requesting information. By default, snapshots of all types are returned.

Valid Values: automated | manual

pub snapshot_type: Option, ///

pub sorting_entities: Option>, ///

A value that requests only snapshots created at or after the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

Example: 2012-07-16T18:00:00Z

pub start_time: Option, ///

A tag key or keys for which you want to return all matching cluster snapshots that are associated with the specified key or keys. For example, suppose that you have snapshots that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching cluster snapshots that are associated with the specified tag value or values. For example, suppose that you have snapshots that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeClusterSnapshotsMessage` contents to a `SignedRequest`. struct DescribeClusterSnapshotsMessageSerializer; impl DescribeClusterSnapshotsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClusterSnapshotsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_exists { params.put(&format!("{}{}", prefix, "ClusterExists"), &field_value); } if let Some(ref field_value) = obj.cluster_identifier { params.put(&format!("{}{}", prefix, "ClusterIdentifier"), &field_value); } if let Some(ref field_value) = obj.end_time { params.put(&format!("{}{}", prefix, "EndTime"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.owner_account { params.put(&format!("{}{}", prefix, "OwnerAccount"), &field_value); } if let Some(ref field_value) = obj.snapshot_identifier { params.put(&format!("{}{}", prefix, "SnapshotIdentifier"), &field_value); } if let Some(ref field_value) = obj.snapshot_type { params.put(&format!("{}{}", prefix, "SnapshotType"), &field_value); } if let Some(ref field_value) = obj.sorting_entities { SnapshotSortingEntityListSerializer::serialize( params, &format!("{}{}", prefix, "SnapshotSortingEntity"), field_value, ); } if let Some(ref field_value) = obj.start_time { params.put(&format!("{}{}", prefix, "StartTime"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClusterSubnetGroupsMessage { ///

The name of the cluster subnet group for which information is requested.

pub cluster_subnet_group_name: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSubnetGroups request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

A tag key or keys for which you want to return all matching cluster subnet groups that are associated with the specified key or keys. For example, suppose that you have subnet groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching cluster subnet groups that are associated with the specified tag value or values. For example, suppose that you have subnet groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeClusterSubnetGroupsMessage` contents to a `SignedRequest`. struct DescribeClusterSubnetGroupsMessageSerializer; impl DescribeClusterSubnetGroupsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClusterSubnetGroupsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_subnet_group_name { params.put( &format!("{}{}", prefix, "ClusterSubnetGroupName"), &field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClusterTracksMessage { ///

The name of the maintenance track.

pub maintenance_track_name: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterTracks request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

An integer value for the maximum number of maintenance tracks to return.

pub max_records: Option, } /// Serialize `DescribeClusterTracksMessage` contents to a `SignedRequest`. struct DescribeClusterTracksMessageSerializer; impl DescribeClusterTracksMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClusterTracksMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.maintenance_track_name { params.put( &format!("{}{}", prefix, "MaintenanceTrackName"), &field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClusterVersionsMessage { ///

The name of a specific cluster parameter group family to return details for.

Constraints:

  • Must be 1 to 255 alphanumeric characters

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

pub cluster_parameter_group_family: Option, ///

The specific cluster version to return.

Example: 1.0

pub cluster_version: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterVersions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, } /// Serialize `DescribeClusterVersionsMessage` contents to a `SignedRequest`. struct DescribeClusterVersionsMessageSerializer; impl DescribeClusterVersionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClusterVersionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_parameter_group_family { params.put( &format!("{}{}", prefix, "ClusterParameterGroupFamily"), &field_value, ); } if let Some(ref field_value) = obj.cluster_version { params.put(&format!("{}{}", prefix, "ClusterVersion"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeClustersMessage { ///

The unique identifier of a cluster whose properties you are requesting. This parameter is case sensitive.

The default is that all clusters defined for an account are returned.

pub cluster_identifier: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the ClusterIdentifier parameter or the Marker parameter, but not both.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

A tag key or keys for which you want to return all matching clusters that are associated with the specified key or keys. For example, suppose that you have clusters that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching clusters that are associated with the specified tag value or values. For example, suppose that you have clusters that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeClustersMessage` contents to a `SignedRequest`. struct DescribeClustersMessageSerializer; impl DescribeClustersMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeClustersMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_identifier { params.put(&format!("{}{}", prefix, "ClusterIdentifier"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeDefaultClusterParametersMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDefaultClusterParameters request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The name of the cluster parameter group family.

pub parameter_group_family: String, } /// Serialize `DescribeDefaultClusterParametersMessage` contents to a `SignedRequest`. struct DescribeDefaultClusterParametersMessageSerializer; impl DescribeDefaultClusterParametersMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDefaultClusterParametersMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } params.put( &format!("{}{}", prefix, "ParameterGroupFamily"), &obj.parameter_group_family, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct DescribeDefaultClusterParametersResult { pub default_cluster_parameters: Option, } #[allow(dead_code)] struct DescribeDefaultClusterParametersResultDeserializer; impl DescribeDefaultClusterParametersResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, DescribeDefaultClusterParametersResult, _>( tag_name, stack, |name, stack, obj| { match name { "DefaultClusterParameters" => { obj.default_cluster_parameters = Some(DefaultClusterParametersDeserializer::deserialize( "DefaultClusterParameters", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeEventCategoriesMessage { ///

The source type, such as cluster or parameter group, to which the described event categories apply.

Valid values: cluster, cluster-snapshot, cluster-parameter-group, cluster-security-group, and scheduled-action.

pub source_type: Option, } /// Serialize `DescribeEventCategoriesMessage` contents to a `SignedRequest`. struct DescribeEventCategoriesMessageSerializer; impl DescribeEventCategoriesMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeEventCategoriesMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.source_type { params.put(&format!("{}{}", prefix, "SourceType"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeEventSubscriptionsMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEventSubscriptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The name of the Amazon Redshift event notification subscription to be described.

pub subscription_name: Option, ///

A tag key or keys for which you want to return all matching event notification subscriptions that are associated with the specified key or keys. For example, suppose that you have subscriptions that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subscriptions that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching event notification subscriptions that are associated with the specified tag value or values. For example, suppose that you have subscriptions that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the subscriptions that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeEventSubscriptionsMessage` contents to a `SignedRequest`. struct DescribeEventSubscriptionsMessageSerializer; impl DescribeEventSubscriptionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeEventSubscriptionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.subscription_name { params.put(&format!("{}{}", prefix, "SubscriptionName"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeEventsMessage { ///

The number of minutes prior to the time of the request for which to retrieve events. For example, if the request is sent at 18:00 and you specify a duration of 60, then only events which have occurred after 17:00 will be returned.

Default: 60

pub duration: Option, ///

The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

Example: 2009-07-08T18:00Z

pub end_time: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEvents request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The identifier of the event source for which events will be returned. If this parameter is not specified, then all sources are included in the response.

Constraints:

If SourceIdentifier is supplied, SourceType must also be provided.

  • Specify a cluster identifier when SourceType is cluster.

  • Specify a cluster security group name when SourceType is cluster-security-group.

  • Specify a cluster parameter group name when SourceType is cluster-parameter-group.

  • Specify a cluster snapshot identifier when SourceType is cluster-snapshot.

pub source_identifier: Option, ///

The event source to retrieve events for. If no value is specified, all events are returned.

Constraints:

If SourceType is supplied, SourceIdentifier must also be provided.

  • Specify cluster when SourceIdentifier is a cluster identifier.

  • Specify cluster-security-group when SourceIdentifier is a cluster security group name.

  • Specify cluster-parameter-group when SourceIdentifier is a cluster parameter group name.

  • Specify cluster-snapshot when SourceIdentifier is a cluster snapshot identifier.

pub source_type: Option, ///

The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

Example: 2009-07-08T18:00Z

pub start_time: Option, } /// Serialize `DescribeEventsMessage` contents to a `SignedRequest`. struct DescribeEventsMessageSerializer; impl DescribeEventsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeEventsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.duration { params.put(&format!("{}{}", prefix, "Duration"), &field_value); } if let Some(ref field_value) = obj.end_time { params.put(&format!("{}{}", prefix, "EndTime"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.source_identifier { params.put(&format!("{}{}", prefix, "SourceIdentifier"), &field_value); } if let Some(ref field_value) = obj.source_type { params.put(&format!("{}{}", prefix, "SourceType"), &field_value); } if let Some(ref field_value) = obj.start_time { params.put(&format!("{}{}", prefix, "StartTime"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeHsmClientCertificatesMessage { ///

The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your AWS customer account.

pub hsm_client_certificate_identifier: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmClientCertificates request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

A tag key or keys for which you want to return all matching HSM client certificates that are associated with the specified key or keys. For example, suppose that you have HSM client certificates that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching HSM client certificates that are associated with the specified tag value or values. For example, suppose that you have HSM client certificates that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeHsmClientCertificatesMessage` contents to a `SignedRequest`. struct DescribeHsmClientCertificatesMessageSerializer; impl DescribeHsmClientCertificatesMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeHsmClientCertificatesMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.hsm_client_certificate_identifier { params.put( &format!("{}{}", prefix, "HsmClientCertificateIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeHsmConfigurationsMessage { ///

The identifier of a specific Amazon Redshift HSM configuration to be described. If no identifier is specified, information is returned for all HSM configurations owned by your AWS customer account.

pub hsm_configuration_identifier: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmConfigurations request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

A tag key or keys for which you want to return all matching HSM configurations that are associated with the specified key or keys. For example, suppose that you have HSM configurations that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching HSM configurations that are associated with the specified tag value or values. For example, suppose that you have HSM configurations that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeHsmConfigurationsMessage` contents to a `SignedRequest`. struct DescribeHsmConfigurationsMessageSerializer; impl DescribeHsmConfigurationsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeHsmConfigurationsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.hsm_configuration_identifier { params.put( &format!("{}{}", prefix, "HsmConfigurationIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeLoggingStatusMessage { ///

The identifier of the cluster from which to get the logging status.

Example: examplecluster

pub cluster_identifier: String, } /// Serialize `DescribeLoggingStatusMessage` contents to a `SignedRequest`. struct DescribeLoggingStatusMessageSerializer; impl DescribeLoggingStatusMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeLoggingStatusMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeNodeConfigurationOptionsMessage { ///

The action type to evaluate for possible node configurations. Specify "restore-cluster" to get configuration combinations based on an existing snapshot. Specify "recommend-node-config" to get configuration recommendations based on an existing cluster or snapshot. Specify "resize-cluster" to get configuration combinations for elastic resize based on an existing cluster.

pub action_type: String, ///

The identifier of the cluster to evaluate for possible node configurations.

pub cluster_identifier: Option, ///

A set of name, operator, and value items to filter the results.

pub filters: Option>, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeNodeConfigurationOptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 500

Constraints: minimum 100, maximum 500.

pub max_records: Option, ///

The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

pub owner_account: Option, ///

The identifier of the snapshot to evaluate for possible node configurations.

pub snapshot_identifier: Option, } /// Serialize `DescribeNodeConfigurationOptionsMessage` contents to a `SignedRequest`. struct DescribeNodeConfigurationOptionsMessageSerializer; impl DescribeNodeConfigurationOptionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeNodeConfigurationOptionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "ActionType"), &obj.action_type); if let Some(ref field_value) = obj.cluster_identifier { params.put(&format!("{}{}", prefix, "ClusterIdentifier"), &field_value); } if let Some(ref field_value) = obj.filters { NodeConfigurationOptionsFilterListSerializer::serialize( params, &format!("{}{}", prefix, "NodeConfigurationOptionsFilter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.owner_account { params.put(&format!("{}{}", prefix, "OwnerAccount"), &field_value); } if let Some(ref field_value) = obj.snapshot_identifier { params.put(&format!("{}{}", prefix, "SnapshotIdentifier"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeOrderableClusterOptionsMessage { ///

The version filter value. Specify this parameter to show only the available offerings matching the specified version.

Default: All versions.

Constraints: Must be one of the version returned from DescribeClusterVersions.

pub cluster_version: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeOrderableClusterOptions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The node type filter value. Specify this parameter to show only the available offerings matching the specified node type.

pub node_type: Option, } /// Serialize `DescribeOrderableClusterOptionsMessage` contents to a `SignedRequest`. struct DescribeOrderableClusterOptionsMessageSerializer; impl DescribeOrderableClusterOptionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeOrderableClusterOptionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_version { params.put(&format!("{}{}", prefix, "ClusterVersion"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.node_type { params.put(&format!("{}{}", prefix, "NodeType"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeReservedNodeOfferingsMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodeOfferings request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The unique identifier for the offering.

pub reserved_node_offering_id: Option, } /// Serialize `DescribeReservedNodeOfferingsMessage` contents to a `SignedRequest`. struct DescribeReservedNodeOfferingsMessageSerializer; impl DescribeReservedNodeOfferingsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeReservedNodeOfferingsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.reserved_node_offering_id { params.put( &format!("{}{}", prefix, "ReservedNodeOfferingId"), &field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeReservedNodesMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeReservedNodes request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

Identifier for the node reservation.

pub reserved_node_id: Option, } /// Serialize `DescribeReservedNodesMessage` contents to a `SignedRequest`. struct DescribeReservedNodesMessageSerializer; impl DescribeReservedNodesMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeReservedNodesMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.reserved_node_id { params.put(&format!("{}{}", prefix, "ReservedNodeId"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeResizeMessage { ///

The unique identifier of a cluster whose resize progress you are requesting. This parameter is case-sensitive.

By default, resize operations for all clusters defined for an AWS account are returned.

pub cluster_identifier: String, } /// Serialize `DescribeResizeMessage` contents to a `SignedRequest`. struct DescribeResizeMessageSerializer; impl DescribeResizeMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeResizeMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeScheduledActionsMessage { ///

If true, retrieve only active scheduled actions. If false, retrieve only disabled scheduled actions.

pub active: Option, ///

The end time in UTC of the scheduled action to retrieve. Only active scheduled actions that have invocations before this time are retrieved.

pub end_time: Option, ///

List of scheduled action filters.

pub filters: Option>, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeScheduledActions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The name of the scheduled action to retrieve.

pub scheduled_action_name: Option, ///

The start time in UTC of the scheduled actions to retrieve. Only active scheduled actions that have invocations after this time are retrieved.

pub start_time: Option, ///

The type of the scheduled actions to retrieve.

pub target_action_type: Option, } /// Serialize `DescribeScheduledActionsMessage` contents to a `SignedRequest`. struct DescribeScheduledActionsMessageSerializer; impl DescribeScheduledActionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeScheduledActionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.active { params.put(&format!("{}{}", prefix, "Active"), &field_value); } if let Some(ref field_value) = obj.end_time { params.put(&format!("{}{}", prefix, "EndTime"), &field_value); } if let Some(ref field_value) = obj.filters { ScheduledActionFilterListSerializer::serialize( params, &format!("{}{}", prefix, "ScheduledActionFilter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.scheduled_action_name { params.put( &format!("{}{}", prefix, "ScheduledActionName"), &field_value, ); } if let Some(ref field_value) = obj.start_time { params.put(&format!("{}{}", prefix, "StartTime"), &field_value); } if let Some(ref field_value) = obj.target_action_type { params.put(&format!("{}{}", prefix, "TargetActionType"), &field_value); } } } ///

The result of the DescribeSnapshotCopyGrants action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeSnapshotCopyGrantsMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

The name of the snapshot copy grant.

pub snapshot_copy_grant_name: Option, ///

A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeSnapshotCopyGrantsMessage` contents to a `SignedRequest`. struct DescribeSnapshotCopyGrantsMessageSerializer; impl DescribeSnapshotCopyGrantsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeSnapshotCopyGrantsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.snapshot_copy_grant_name { params.put( &format!("{}{}", prefix, "SnapshotCopyGrantName"), &field_value, ); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeSnapshotSchedulesMessage { ///

The unique identifier for the cluster whose snapshot schedules you want to view.

pub cluster_identifier: Option, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

pub max_records: Option, ///

A unique identifier for a snapshot schedule.

pub schedule_identifier: Option, ///

The key value for a snapshot schedule tag.

pub tag_keys: Option>, ///

The value corresponding to the key of the snapshot schedule tag.

pub tag_values: Option>, } /// Serialize `DescribeSnapshotSchedulesMessage` contents to a `SignedRequest`. struct DescribeSnapshotSchedulesMessageSerializer; impl DescribeSnapshotSchedulesMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeSnapshotSchedulesMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_identifier { params.put(&format!("{}{}", prefix, "ClusterIdentifier"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.schedule_identifier { params.put(&format!("{}{}", prefix, "ScheduleIdentifier"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct DescribeSnapshotSchedulesOutputMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

A list of SnapshotSchedules.

pub snapshot_schedules: Option>, } #[allow(dead_code)] struct DescribeSnapshotSchedulesOutputMessageDeserializer; impl DescribeSnapshotSchedulesOutputMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, DescribeSnapshotSchedulesOutputMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "SnapshotSchedules" => { obj.snapshot_schedules.get_or_insert(vec![]).extend( SnapshotScheduleListDeserializer::deserialize( "SnapshotSchedules", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeTableRestoreStatusMessage { ///

The Amazon Redshift cluster that the table is being restored to.

pub cluster_identifier: Option, ///

An optional pagination token provided by a previous DescribeTableRestoreStatus request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by the MaxRecords parameter.

pub marker: Option, ///

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

pub max_records: Option, ///

The identifier of the table restore request to return status for. If you don't specify a TableRestoreRequestId value, then DescribeTableRestoreStatus returns the status of all in-progress table restore requests.

pub table_restore_request_id: Option, } /// Serialize `DescribeTableRestoreStatusMessage` contents to a `SignedRequest`. struct DescribeTableRestoreStatusMessageSerializer; impl DescribeTableRestoreStatusMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeTableRestoreStatusMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_identifier { params.put(&format!("{}{}", prefix, "ClusterIdentifier"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.table_restore_request_id { params.put( &format!("{}{}", prefix, "TableRestoreRequestId"), &field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeTagsMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

pub max_records: Option, ///

The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1.

pub resource_name: Option, ///

The type of resource with which you want to view tags. Valid resource types are:

  • Cluster

  • CIDR/IP

  • EC2 security group

  • Snapshot

  • Cluster security group

  • Subnet group

  • HSM connection

  • HSM certificate

  • Parameter group

  • Snapshot copy grant

For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide.

pub resource_type: Option, ///

A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.

pub tag_values: Option>, } /// Serialize `DescribeTagsMessage` contents to a `SignedRequest`. struct DescribeTagsMessageSerializer; impl DescribeTagsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeTagsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.resource_name { params.put(&format!("{}{}", prefix, "ResourceName"), &field_value); } if let Some(ref field_value) = obj.resource_type { params.put(&format!("{}{}", prefix, "ResourceType"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DescribeUsageLimitsMessage { ///

The identifier of the cluster for which you want to describe usage limits.

pub cluster_identifier: Option, ///

The feature type for which you want to describe usage limits.

pub feature_type: Option, ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeUsageLimits request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

pub max_records: Option, ///

A tag key or keys for which you want to return all matching usage limit objects that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the usage limit objects have either or both of these tag keys associated with them.

pub tag_keys: Option>, ///

A tag value or values for which you want to return all matching usage limit objects that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the usage limit objects that have either or both of these tag values associated with them.

pub tag_values: Option>, ///

The identifier of the usage limit to describe.

pub usage_limit_id: Option, } /// Serialize `DescribeUsageLimitsMessage` contents to a `SignedRequest`. struct DescribeUsageLimitsMessageSerializer; impl DescribeUsageLimitsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeUsageLimitsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cluster_identifier { params.put(&format!("{}{}", prefix, "ClusterIdentifier"), &field_value); } if let Some(ref field_value) = obj.feature_type { params.put(&format!("{}{}", prefix, "FeatureType"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } if let Some(ref field_value) = obj.tag_keys { TagKeyListSerializer::serialize( params, &format!("{}{}", prefix, "TagKey"), field_value, ); } if let Some(ref field_value) = obj.tag_values { TagValueListSerializer::serialize( params, &format!("{}{}", prefix, "TagValue"), field_value, ); } if let Some(ref field_value) = obj.usage_limit_id { params.put(&format!("{}{}", prefix, "UsageLimitId"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DisableLoggingMessage { ///

The identifier of the cluster on which logging is to be stopped.

Example: examplecluster

pub cluster_identifier: String, } /// Serialize `DisableLoggingMessage` contents to a `SignedRequest`. struct DisableLoggingMessageSerializer; impl DisableLoggingMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DisableLoggingMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct DisableSnapshotCopyMessage { ///

The unique identifier of the source cluster that you want to disable copying of snapshots to a destination region.

Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

pub cluster_identifier: String, } /// Serialize `DisableSnapshotCopyMessage` contents to a `SignedRequest`. struct DisableSnapshotCopyMessageSerializer; impl DisableSnapshotCopyMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DisableSnapshotCopyMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct DisableSnapshotCopyResult { pub cluster: Option, } #[allow(dead_code)] struct DisableSnapshotCopyResultDeserializer; impl DisableSnapshotCopyResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, DisableSnapshotCopyResult, _>( tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct DoubleDeserializer; impl DoubleDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, |s| Ok(f64::from_str(&s).unwrap())) } } #[allow(dead_code)] struct DoubleOptionalDeserializer; impl DoubleOptionalDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, |s| Ok(f64::from_str(&s).unwrap())) } } ///

Describes an Amazon EC2 security group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct EC2SecurityGroup { ///

The name of the EC2 Security Group.

pub ec2_security_group_name: Option, ///

The AWS ID of the owner of the EC2 security group specified in the EC2SecurityGroupName field.

pub ec2_security_group_owner_id: Option, ///

The status of the EC2 security group.

pub status: Option, ///

The list of tags for the EC2 security group.

pub tags: Option>, } #[allow(dead_code)] struct EC2SecurityGroupDeserializer; impl EC2SecurityGroupDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, EC2SecurityGroup, _>(tag_name, stack, |name, stack, obj| { match name { "EC2SecurityGroupName" => { obj.ec2_security_group_name = Some(StringDeserializer::deserialize( "EC2SecurityGroupName", stack, )?); } "EC2SecurityGroupOwnerId" => { obj.ec2_security_group_owner_id = Some(StringDeserializer::deserialize( "EC2SecurityGroupOwnerId", stack, )?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct EC2SecurityGroupListDeserializer; impl EC2SecurityGroupListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "EC2SecurityGroup" { obj.push(EC2SecurityGroupDeserializer::deserialize( "EC2SecurityGroup", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes the status of the elastic IP (EIP) address.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ElasticIpStatus { ///

The elastic IP (EIP) address for the cluster.

pub elastic_ip: Option, ///

The status of the elastic IP (EIP) address.

pub status: Option, } #[allow(dead_code)] struct ElasticIpStatusDeserializer; impl ElasticIpStatusDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ElasticIpStatus, _>(tag_name, stack, |name, stack, obj| { match name { "ElasticIp" => { obj.elastic_ip = Some(StringDeserializer::deserialize("ElasticIp", stack)?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct EligibleTracksToUpdateListDeserializer; impl EligibleTracksToUpdateListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "UpdateTarget" { obj.push(UpdateTargetDeserializer::deserialize( "UpdateTarget", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct EnableLoggingMessage { ///

The name of an existing S3 bucket where the log files are to be stored.

Constraints:

  • Must be in the same region as the cluster

  • The cluster must have read bucket and put object permissions

pub bucket_name: String, ///

The identifier of the cluster on which logging is to be started.

Example: examplecluster

pub cluster_identifier: String, ///

The prefix applied to the log file names.

Constraints:

  • Cannot exceed 512 characters

  • Cannot contain spaces( ), double quotes ("), single quotes ('), a backslash (), or control characters. The hexadecimal codes for invalid characters are:

    • x00 to x20

    • x22

    • x27

    • x5c

    • x7f or larger

pub s3_key_prefix: Option, } /// Serialize `EnableLoggingMessage` contents to a `SignedRequest`. struct EnableLoggingMessageSerializer; impl EnableLoggingMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &EnableLoggingMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "BucketName"), &obj.bucket_name); params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.s3_key_prefix { params.put(&format!("{}{}", prefix, "S3KeyPrefix"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct EnableSnapshotCopyMessage { ///

The unique identifier of the source cluster to copy snapshots from.

Constraints: Must be the valid name of an existing cluster that does not already have cross-region snapshot copy enabled.

pub cluster_identifier: String, ///

The destination AWS Region that you want to copy snapshots to.

Constraints: Must be the name of a valid AWS Region. For more information, see Regions and Endpoints in the Amazon Web Services General Reference.

pub destination_region: String, ///

The number of days to retain newly copied snapshots in the destination AWS Region after they are copied from the source AWS Region. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

pub manual_snapshot_retention_period: Option, ///

The number of days to retain automated snapshots in the destination region after they are copied from the source region.

Default: 7.

Constraints: Must be at least 1 and no more than 35.

pub retention_period: Option, ///

The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region.

pub snapshot_copy_grant_name: Option, } /// Serialize `EnableSnapshotCopyMessage` contents to a `SignedRequest`. struct EnableSnapshotCopyMessageSerializer; impl EnableSnapshotCopyMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &EnableSnapshotCopyMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); params.put( &format!("{}{}", prefix, "DestinationRegion"), &obj.destination_region, ); if let Some(ref field_value) = obj.manual_snapshot_retention_period { params.put( &format!("{}{}", prefix, "ManualSnapshotRetentionPeriod"), &field_value, ); } if let Some(ref field_value) = obj.retention_period { params.put(&format!("{}{}", prefix, "RetentionPeriod"), &field_value); } if let Some(ref field_value) = obj.snapshot_copy_grant_name { params.put( &format!("{}{}", prefix, "SnapshotCopyGrantName"), &field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct EnableSnapshotCopyResult { pub cluster: Option, } #[allow(dead_code)] struct EnableSnapshotCopyResultDeserializer; impl EnableSnapshotCopyResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, EnableSnapshotCopyResult, _>( tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes a connection endpoint.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct Endpoint { ///

The DNS address of the Cluster.

pub address: Option, ///

The port that the database engine is listening on.

pub port: Option, } #[allow(dead_code)] struct EndpointDeserializer; impl EndpointDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, Endpoint, _>(tag_name, stack, |name, stack, obj| { match name { "Address" => { obj.address = Some(StringDeserializer::deserialize("Address", stack)?); } "Port" => { obj.port = Some(IntegerDeserializer::deserialize("Port", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes an event.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct Event { ///

The date and time of the event.

pub date: Option, ///

A list of the event categories.

Values: Configuration, Management, Monitoring, Security

pub event_categories: Option>, ///

The identifier of the event.

pub event_id: Option, ///

The text of this event.

pub message: Option, ///

The severity of the event.

Values: ERROR, INFO

pub severity: Option, ///

The identifier for the source of the event.

pub source_identifier: Option, ///

The source type for this event.

pub source_type: Option, } #[allow(dead_code)] struct EventDeserializer; impl EventDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { deserialize_elements::<_, Event, _>(tag_name, stack, |name, stack, obj| { match name { "Date" => { obj.date = Some(TStampDeserializer::deserialize("Date", stack)?); } "EventCategories" => { obj.event_categories.get_or_insert(vec![]).extend( EventCategoriesListDeserializer::deserialize("EventCategories", stack)?, ); } "EventId" => { obj.event_id = Some(StringDeserializer::deserialize("EventId", stack)?); } "Message" => { obj.message = Some(StringDeserializer::deserialize("Message", stack)?); } "Severity" => { obj.severity = Some(StringDeserializer::deserialize("Severity", stack)?); } "SourceIdentifier" => { obj.source_identifier = Some(StringDeserializer::deserialize("SourceIdentifier", stack)?); } "SourceType" => { obj.source_type = Some(SourceTypeDeserializer::deserialize("SourceType", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct EventCategoriesListDeserializer; impl EventCategoriesListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "EventCategory" { obj.push(StringDeserializer::deserialize("EventCategory", stack)?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `EventCategoriesList` contents to a `SignedRequest`. struct EventCategoriesListSerializer; impl EventCategoriesListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } ///

Describes event categories.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct EventCategoriesMap { ///

The events in the event category.

pub events: Option>, ///

The source type, such as cluster or cluster-snapshot, that the returned categories belong to.

pub source_type: Option, } #[allow(dead_code)] struct EventCategoriesMapDeserializer; impl EventCategoriesMapDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, EventCategoriesMap, _>(tag_name, stack, |name, stack, obj| { match name { "Events" => { obj.events .get_or_insert(vec![]) .extend(EventInfoMapListDeserializer::deserialize("Events", stack)?); } "SourceType" => { obj.source_type = Some(StringDeserializer::deserialize("SourceType", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct EventCategoriesMapListDeserializer; impl EventCategoriesMapListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "EventCategoriesMap" { obj.push(EventCategoriesMapDeserializer::deserialize( "EventCategoriesMap", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct EventCategoriesMessage { ///

A list of event categories descriptions.

pub event_categories_map_list: Option>, } #[allow(dead_code)] struct EventCategoriesMessageDeserializer; impl EventCategoriesMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, EventCategoriesMessage, _>(tag_name, stack, |name, stack, obj| { match name { "EventCategoriesMapList" => { obj.event_categories_map_list.get_or_insert(vec![]).extend( EventCategoriesMapListDeserializer::deserialize( "EventCategoriesMapList", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes event information.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct EventInfoMap { ///

The category of an Amazon Redshift event.

pub event_categories: Option>, ///

The description of an Amazon Redshift event.

pub event_description: Option, ///

The identifier of an Amazon Redshift event.

pub event_id: Option, ///

The severity of the event.

Values: ERROR, INFO

pub severity: Option, } #[allow(dead_code)] struct EventInfoMapDeserializer; impl EventInfoMapDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, EventInfoMap, _>(tag_name, stack, |name, stack, obj| { match name { "EventCategories" => { obj.event_categories.get_or_insert(vec![]).extend( EventCategoriesListDeserializer::deserialize("EventCategories", stack)?, ); } "EventDescription" => { obj.event_description = Some(StringDeserializer::deserialize("EventDescription", stack)?); } "EventId" => { obj.event_id = Some(StringDeserializer::deserialize("EventId", stack)?); } "Severity" => { obj.severity = Some(StringDeserializer::deserialize("Severity", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct EventInfoMapListDeserializer; impl EventInfoMapListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "EventInfoMap" { obj.push(EventInfoMapDeserializer::deserialize( "EventInfoMap", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct EventListDeserializer; impl EventListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "Event" { obj.push(EventDeserializer::deserialize("Event", stack)?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes event subscriptions.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct EventSubscription { ///

The name of the Amazon Redshift event notification subscription.

pub cust_subscription_id: Option, ///

The AWS customer account associated with the Amazon Redshift event notification subscription.

pub customer_aws_id: Option, ///

A boolean value indicating whether the subscription is enabled; true indicates that the subscription is enabled.

pub enabled: Option, ///

The list of Amazon Redshift event categories specified in the event notification subscription.

Values: Configuration, Management, Monitoring, Security

pub event_categories_list: Option>, ///

The event severity specified in the Amazon Redshift event notification subscription.

Values: ERROR, INFO

pub severity: Option, ///

The Amazon Resource Name (ARN) of the Amazon SNS topic used by the event notification subscription.

pub sns_topic_arn: Option, ///

A list of the sources that publish events to the Amazon Redshift event notification subscription.

pub source_ids_list: Option>, ///

The source type of the events returned by the Amazon Redshift event notification, such as cluster, cluster-snapshot, cluster-parameter-group, cluster-security-group, or scheduled-action.

pub source_type: Option, ///

The status of the Amazon Redshift event notification subscription.

Constraints:

  • Can be one of the following: active | no-permission | topic-not-exist

  • The status "no-permission" indicates that Amazon Redshift no longer has permission to post to the Amazon SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.

pub status: Option, ///

The date and time the Amazon Redshift event notification subscription was created.

pub subscription_creation_time: Option, ///

The list of tags for the event subscription.

pub tags: Option>, } #[allow(dead_code)] struct EventSubscriptionDeserializer; impl EventSubscriptionDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, EventSubscription, _>(tag_name, stack, |name, stack, obj| { match name { "CustSubscriptionId" => { obj.cust_subscription_id = Some(StringDeserializer::deserialize( "CustSubscriptionId", stack, )?); } "CustomerAwsId" => { obj.customer_aws_id = Some(StringDeserializer::deserialize("CustomerAwsId", stack)?); } "Enabled" => { obj.enabled = Some(BooleanDeserializer::deserialize("Enabled", stack)?); } "EventCategoriesList" => { obj.event_categories_list.get_or_insert(vec![]).extend( EventCategoriesListDeserializer::deserialize("EventCategoriesList", stack)?, ); } "Severity" => { obj.severity = Some(StringDeserializer::deserialize("Severity", stack)?); } "SnsTopicArn" => { obj.sns_topic_arn = Some(StringDeserializer::deserialize("SnsTopicArn", stack)?); } "SourceIdsList" => { obj.source_ids_list.get_or_insert(vec![]).extend( SourceIdsListDeserializer::deserialize("SourceIdsList", stack)?, ); } "SourceType" => { obj.source_type = Some(StringDeserializer::deserialize("SourceType", stack)?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } "SubscriptionCreationTime" => { obj.subscription_creation_time = Some(TStampDeserializer::deserialize( "SubscriptionCreationTime", stack, )?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct EventSubscriptionsListDeserializer; impl EventSubscriptionsListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "EventSubscription" { obj.push(EventSubscriptionDeserializer::deserialize( "EventSubscription", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct EventSubscriptionsMessage { ///

A list of event subscriptions.

pub event_subscriptions_list: Option>, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, } #[allow(dead_code)] struct EventSubscriptionsMessageDeserializer; impl EventSubscriptionsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, EventSubscriptionsMessage, _>( tag_name, stack, |name, stack, obj| { match name { "EventSubscriptionsList" => { obj.event_subscriptions_list.get_or_insert(vec![]).extend( EventSubscriptionsListDeserializer::deserialize( "EventSubscriptionsList", stack, )?, ); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct EventsMessage { ///

A list of Event instances.

pub events: Option>, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, } #[allow(dead_code)] struct EventsMessageDeserializer; impl EventsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, EventsMessage, _>(tag_name, stack, |name, stack, obj| { match name { "Events" => { obj.events .get_or_insert(vec![]) .extend(EventListDeserializer::deserialize("Events", stack)?); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

The request parameters to get cluster credentials.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct GetClusterCredentialsMessage { ///

Create a database user with the name specified for the user named in DbUser if one does not exist.

pub auto_create: Option, ///

The unique identifier of the cluster that contains the database for which your are requesting credentials. This parameter is case sensitive.

pub cluster_identifier: String, ///

A list of the names of existing database groups that the user named in DbUser will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC.

Database group name constraints

  • Must be 1 to 64 alphanumeric characters or hyphens

  • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

  • First character must be a letter.

  • Must not contain a colon ( : ) or slash ( / ).

  • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

pub db_groups: Option>, ///

The name of a database that DbUser is authorized to log on to. If DbName is not specified, DbUser can log on to any existing database.

Constraints:

  • Must be 1 to 64 alphanumeric characters or hyphens

  • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

  • First character must be a letter.

  • Must not contain a colon ( : ) or slash ( / ).

  • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

pub db_name: Option, ///

The name of a database user. If a user name matching DbUser exists in the database, the temporary user credentials have the same permissions as the existing user. If DbUser doesn't exist in the database and Autocreate is True, a new user is created using the value for DbUser with PUBLIC permissions. If a database user matching the value for DbUser doesn't exist and Autocreate is False, then the command succeeds but the connection attempt will fail because the user doesn't exist in the database.

For more information, see CREATE USER in the Amazon Redshift Database Developer Guide.

Constraints:

  • Must be 1 to 64 alphanumeric characters or hyphens. The user name can't be PUBLIC.

  • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

  • First character must be a letter.

  • Must not contain a colon ( : ) or slash ( / ).

  • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

pub db_user: String, ///

The number of seconds until the returned temporary password expires.

Constraint: minimum 900, maximum 3600.

Default: 900

pub duration_seconds: Option, } /// Serialize `GetClusterCredentialsMessage` contents to a `SignedRequest`. struct GetClusterCredentialsMessageSerializer; impl GetClusterCredentialsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &GetClusterCredentialsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.auto_create { params.put(&format!("{}{}", prefix, "AutoCreate"), &field_value); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.db_groups { DbGroupListSerializer::serialize( params, &format!("{}{}", prefix, "DbGroup"), field_value, ); } if let Some(ref field_value) = obj.db_name { params.put(&format!("{}{}", prefix, "DbName"), &field_value); } params.put(&format!("{}{}", prefix, "DbUser"), &obj.db_user); if let Some(ref field_value) = obj.duration_seconds { params.put(&format!("{}{}", prefix, "DurationSeconds"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct GetReservedNodeExchangeOfferingsInputMessage { ///

A value that indicates the starting point for the next set of ReservedNodeOfferings.

pub marker: Option, ///

An integer setting the maximum number of ReservedNodeOfferings to retrieve.

pub max_records: Option, ///

A string representing the node identifier for the DC1 Reserved Node to be exchanged.

pub reserved_node_id: String, } /// Serialize `GetReservedNodeExchangeOfferingsInputMessage` contents to a `SignedRequest`. struct GetReservedNodeExchangeOfferingsInputMessageSerializer; impl GetReservedNodeExchangeOfferingsInputMessageSerializer { fn serialize( params: &mut Params, name: &str, obj: &GetReservedNodeExchangeOfferingsInputMessage, ) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put(&format!("{}{}", prefix, "MaxRecords"), &field_value); } params.put( &format!("{}{}", prefix, "ReservedNodeId"), &obj.reserved_node_id, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct GetReservedNodeExchangeOfferingsOutputMessage { ///

An optional parameter that specifies the starting point for returning a set of response records. When the results of a GetReservedNodeExchangeOfferings request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.

pub marker: Option, ///

Returns an array of ReservedNodeOffering objects.

pub reserved_node_offerings: Option>, } #[allow(dead_code)] struct GetReservedNodeExchangeOfferingsOutputMessageDeserializer; impl GetReservedNodeExchangeOfferingsOutputMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, GetReservedNodeExchangeOfferingsOutputMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "ReservedNodeOfferings" => { obj.reserved_node_offerings.get_or_insert(vec![]).extend( ReservedNodeOfferingListDeserializer::deserialize( "ReservedNodeOfferings", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Returns information about an HSM client certificate. The certificate is stored in a secure Hardware Storage Module (HSM), and used by the Amazon Redshift cluster to encrypt data files.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct HsmClientCertificate { ///

The identifier of the HSM client certificate.

pub hsm_client_certificate_identifier: Option, ///

The public key that the Amazon Redshift cluster will use to connect to the HSM. You must register the public key in the HSM.

pub hsm_client_certificate_public_key: Option, ///

The list of tags for the HSM client certificate.

pub tags: Option>, } #[allow(dead_code)] struct HsmClientCertificateDeserializer; impl HsmClientCertificateDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, HsmClientCertificate, _>(tag_name, stack, |name, stack, obj| { match name { "HsmClientCertificateIdentifier" => { obj.hsm_client_certificate_identifier = Some(StringDeserializer::deserialize( "HsmClientCertificateIdentifier", stack, )?); } "HsmClientCertificatePublicKey" => { obj.hsm_client_certificate_public_key = Some(StringDeserializer::deserialize( "HsmClientCertificatePublicKey", stack, )?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct HsmClientCertificateListDeserializer; impl HsmClientCertificateListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "HsmClientCertificate" { obj.push(HsmClientCertificateDeserializer::deserialize( "HsmClientCertificate", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct HsmClientCertificateMessage { ///

A list of the identifiers for one or more HSM client certificates used by Amazon Redshift clusters to store and retrieve database encryption keys in an HSM.

pub hsm_client_certificates: Option>, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, } #[allow(dead_code)] struct HsmClientCertificateMessageDeserializer; impl HsmClientCertificateMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, HsmClientCertificateMessage, _>( tag_name, stack, |name, stack, obj| { match name { "HsmClientCertificates" => { obj.hsm_client_certificates.get_or_insert(vec![]).extend( HsmClientCertificateListDeserializer::deserialize( "HsmClientCertificates", stack, )?, ); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Returns information about an HSM configuration, which is an object that describes to Amazon Redshift clusters the information they require to connect to an HSM where they can store database encryption keys.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct HsmConfiguration { ///

A text description of the HSM configuration.

pub description: Option, ///

The name of the Amazon Redshift HSM configuration.

pub hsm_configuration_identifier: Option, ///

The IP address that the Amazon Redshift cluster must use to access the HSM.

pub hsm_ip_address: Option, ///

The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.

pub hsm_partition_name: Option, ///

The list of tags for the HSM configuration.

pub tags: Option>, } #[allow(dead_code)] struct HsmConfigurationDeserializer; impl HsmConfigurationDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, HsmConfiguration, _>(tag_name, stack, |name, stack, obj| { match name { "Description" => { obj.description = Some(StringDeserializer::deserialize("Description", stack)?); } "HsmConfigurationIdentifier" => { obj.hsm_configuration_identifier = Some(StringDeserializer::deserialize( "HsmConfigurationIdentifier", stack, )?); } "HsmIpAddress" => { obj.hsm_ip_address = Some(StringDeserializer::deserialize("HsmIpAddress", stack)?); } "HsmPartitionName" => { obj.hsm_partition_name = Some(StringDeserializer::deserialize("HsmPartitionName", stack)?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct HsmConfigurationListDeserializer; impl HsmConfigurationListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "HsmConfiguration" { obj.push(HsmConfigurationDeserializer::deserialize( "HsmConfiguration", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct HsmConfigurationMessage { ///

A list of HsmConfiguration objects.

pub hsm_configurations: Option>, ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, } #[allow(dead_code)] struct HsmConfigurationMessageDeserializer; impl HsmConfigurationMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, HsmConfigurationMessage, _>( tag_name, stack, |name, stack, obj| { match name { "HsmConfigurations" => { obj.hsm_configurations.get_or_insert(vec![]).extend( HsmConfigurationListDeserializer::deserialize( "HsmConfigurations", stack, )?, ); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes the status of changes to HSM settings.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct HsmStatus { ///

Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

pub hsm_client_certificate_identifier: Option, ///

Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

pub hsm_configuration_identifier: Option, ///

Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.

Values: active, applying

pub status: Option, } #[allow(dead_code)] struct HsmStatusDeserializer; impl HsmStatusDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, HsmStatus, _>(tag_name, stack, |name, stack, obj| { match name { "HsmClientCertificateIdentifier" => { obj.hsm_client_certificate_identifier = Some(StringDeserializer::deserialize( "HsmClientCertificateIdentifier", stack, )?); } "HsmConfigurationIdentifier" => { obj.hsm_configuration_identifier = Some(StringDeserializer::deserialize( "HsmConfigurationIdentifier", stack, )?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes an IP range used in a security group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct IPRange { ///

The IP range in Classless Inter-Domain Routing (CIDR) notation.

pub cidrip: Option, ///

The status of the IP range, for example, "authorized".

pub status: Option, ///

The list of tags for the IP range.

pub tags: Option>, } #[allow(dead_code)] struct IPRangeDeserializer; impl IPRangeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, IPRange, _>(tag_name, stack, |name, stack, obj| { match name { "CIDRIP" => { obj.cidrip = Some(StringDeserializer::deserialize("CIDRIP", stack)?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct IPRangeListDeserializer; impl IPRangeListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "IPRange" { obj.push(IPRangeDeserializer::deserialize("IPRange", stack)?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `IamRoleArnList` contents to a `SignedRequest`. struct IamRoleArnListSerializer; impl IamRoleArnListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } #[allow(dead_code)] struct ImportTablesCompletedDeserializer; impl ImportTablesCompletedDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "member" { obj.push(StringDeserializer::deserialize("member", stack)?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct ImportTablesInProgressDeserializer; impl ImportTablesInProgressDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "member" { obj.push(StringDeserializer::deserialize("member", stack)?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct ImportTablesNotStartedDeserializer; impl ImportTablesNotStartedDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "member" { obj.push(StringDeserializer::deserialize("member", stack)?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct IntegerDeserializer; impl IntegerDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, |s| Ok(i64::from_str(&s).unwrap())) } } #[allow(dead_code)] struct IntegerOptionalDeserializer; impl IntegerOptionalDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, |s| Ok(i64::from_str(&s).unwrap())) } } ///

Describes the status of logging for a cluster.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct LoggingStatus { ///

The name of the S3 bucket where the log files are stored.

pub bucket_name: Option, ///

The message indicating that logs failed to be delivered.

pub last_failure_message: Option, ///

The last time when logs failed to be delivered.

pub last_failure_time: Option, ///

The last time that logs were delivered.

pub last_successful_delivery_time: Option, ///

true if logging is on, false if logging is off.

pub logging_enabled: Option, ///

The prefix applied to the log file names.

pub s3_key_prefix: Option, } #[allow(dead_code)] struct LoggingStatusDeserializer; impl LoggingStatusDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, LoggingStatus, _>(tag_name, stack, |name, stack, obj| { match name { "BucketName" => { obj.bucket_name = Some(StringDeserializer::deserialize("BucketName", stack)?); } "LastFailureMessage" => { obj.last_failure_message = Some(StringDeserializer::deserialize( "LastFailureMessage", stack, )?); } "LastFailureTime" => { obj.last_failure_time = Some(TStampDeserializer::deserialize("LastFailureTime", stack)?); } "LastSuccessfulDeliveryTime" => { obj.last_successful_delivery_time = Some(TStampDeserializer::deserialize( "LastSuccessfulDeliveryTime", stack, )?); } "LoggingEnabled" => { obj.logging_enabled = Some(BooleanDeserializer::deserialize("LoggingEnabled", stack)?); } "S3KeyPrefix" => { obj.s3_key_prefix = Some(StringDeserializer::deserialize("S3KeyPrefix", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct LongDeserializer; impl LongDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, |s| Ok(i64::from_str(&s).unwrap())) } } #[allow(dead_code)] struct LongOptionalDeserializer; impl LongOptionalDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, |s| Ok(i64::from_str(&s).unwrap())) } } ///

Defines a maintenance track that determines which Amazon Redshift version to apply during a maintenance window. If the value for MaintenanceTrack is current, the cluster is updated to the most recently certified maintenance release. If the value is trailing, the cluster is updated to the previously certified maintenance release.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct MaintenanceTrack { ///

The version number for the cluster release.

pub database_version: Option, ///

The name of the maintenance track. Possible values are current and trailing.

pub maintenance_track_name: Option, ///

An array of UpdateTarget objects to update with the maintenance track.

pub update_targets: Option>, } #[allow(dead_code)] struct MaintenanceTrackDeserializer; impl MaintenanceTrackDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, MaintenanceTrack, _>(tag_name, stack, |name, stack, obj| { match name { "DatabaseVersion" => { obj.database_version = Some(StringDeserializer::deserialize("DatabaseVersion", stack)?); } "MaintenanceTrackName" => { obj.maintenance_track_name = Some(StringDeserializer::deserialize( "MaintenanceTrackName", stack, )?); } "UpdateTargets" => { obj.update_targets.get_or_insert(vec![]).extend( EligibleTracksToUpdateListDeserializer::deserialize( "UpdateTargets", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct ModeDeserializer; impl ModeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyClusterDbRevisionMessage { ///

The unique identifier of a cluster whose database revision you want to modify.

Example: examplecluster

pub cluster_identifier: String, ///

The identifier of the database revision. You can retrieve this value from the response to the DescribeClusterDbRevisions request.

pub revision_target: String, } /// Serialize `ModifyClusterDbRevisionMessage` contents to a `SignedRequest`. struct ModifyClusterDbRevisionMessageSerializer; impl ModifyClusterDbRevisionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyClusterDbRevisionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); params.put( &format!("{}{}", prefix, "RevisionTarget"), &obj.revision_target, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ModifyClusterDbRevisionResult { pub cluster: Option, } #[allow(dead_code)] struct ModifyClusterDbRevisionResultDeserializer; impl ModifyClusterDbRevisionResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ModifyClusterDbRevisionResult, _>( tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyClusterIamRolesMessage { ///

Zero or more IAM roles to associate with the cluster. The roles must be in their Amazon Resource Name (ARN) format. You can associate up to 10 IAM roles with a single cluster in a single request.

pub add_iam_roles: Option>, ///

The unique identifier of the cluster for which you want to associate or disassociate IAM roles.

pub cluster_identifier: String, ///

Zero or more IAM roles in ARN format to disassociate from the cluster. You can disassociate up to 10 IAM roles from a single cluster in a single request.

pub remove_iam_roles: Option>, } /// Serialize `ModifyClusterIamRolesMessage` contents to a `SignedRequest`. struct ModifyClusterIamRolesMessageSerializer; impl ModifyClusterIamRolesMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyClusterIamRolesMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.add_iam_roles { IamRoleArnListSerializer::serialize( params, &format!("{}{}", prefix, "IamRoleArn"), field_value, ); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.remove_iam_roles { IamRoleArnListSerializer::serialize( params, &format!("{}{}", prefix, "IamRoleArn"), field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ModifyClusterIamRolesResult { pub cluster: Option, } #[allow(dead_code)] struct ModifyClusterIamRolesResultDeserializer; impl ModifyClusterIamRolesResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ModifyClusterIamRolesResult, _>( tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyClusterMaintenanceMessage { ///

A unique identifier for the cluster.

pub cluster_identifier: String, ///

A boolean indicating whether to enable the deferred maintenance window.

pub defer_maintenance: Option, ///

An integer indicating the duration of the maintenance window in days. If you specify a duration, you can't specify an end time. The duration must be 45 days or less.

pub defer_maintenance_duration: Option, ///

A timestamp indicating end time for the deferred maintenance window. If you specify an end time, you can't specify a duration.

pub defer_maintenance_end_time: Option, ///

A unique identifier for the deferred maintenance window.

pub defer_maintenance_identifier: Option, ///

A timestamp indicating the start time for the deferred maintenance window.

pub defer_maintenance_start_time: Option, } /// Serialize `ModifyClusterMaintenanceMessage` contents to a `SignedRequest`. struct ModifyClusterMaintenanceMessageSerializer; impl ModifyClusterMaintenanceMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyClusterMaintenanceMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.defer_maintenance { params.put(&format!("{}{}", prefix, "DeferMaintenance"), &field_value); } if let Some(ref field_value) = obj.defer_maintenance_duration { params.put( &format!("{}{}", prefix, "DeferMaintenanceDuration"), &field_value, ); } if let Some(ref field_value) = obj.defer_maintenance_end_time { params.put( &format!("{}{}", prefix, "DeferMaintenanceEndTime"), &field_value, ); } if let Some(ref field_value) = obj.defer_maintenance_identifier { params.put( &format!("{}{}", prefix, "DeferMaintenanceIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.defer_maintenance_start_time { params.put( &format!("{}{}", prefix, "DeferMaintenanceStartTime"), &field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ModifyClusterMaintenanceResult { pub cluster: Option, } #[allow(dead_code)] struct ModifyClusterMaintenanceResultDeserializer; impl ModifyClusterMaintenanceResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ModifyClusterMaintenanceResult, _>( tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyClusterMessage { ///

If true, major version upgrades will be applied automatically to the cluster during the maintenance window.

Default: false

pub allow_version_upgrade: Option, ///

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

If you decrease the automated snapshot retention period from its current value, existing automated snapshots that fall outside of the new retention period will be immediately deleted.

Default: Uses existing setting.

Constraints: Must be a value from 0 to 35.

pub automated_snapshot_retention_period: Option, ///

The unique identifier of the cluster to be modified.

Example: examplecluster

pub cluster_identifier: String, ///

The name of the cluster parameter group to apply to this cluster. This change is applied only after the cluster is rebooted. To reboot a cluster use RebootCluster.

Default: Uses existing setting.

Constraints: The cluster parameter group must be in the same parameter group family that matches the cluster version.

pub cluster_parameter_group_name: Option, ///

A list of cluster security groups to be authorized on this cluster. This change is asynchronously applied as soon as possible.

Security groups currently associated with the cluster, and not in the list of groups to apply, will be revoked from the cluster.

Constraints:

  • Must be 1 to 255 alphanumeric characters or hyphens

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

pub cluster_security_groups: Option>, ///

The new cluster type.

When you submit your cluster resize request, your existing cluster goes into a read-only mode. After Amazon Redshift provisions a new cluster based on your resize requirements, there will be outage for a period while the old cluster is deleted and your connection is switched to the new cluster. You can use DescribeResize to track the progress of the resize request.

Valid Values: multi-node | single-node

pub cluster_type: Option, ///

The new version number of the Amazon Redshift engine to upgrade to.

For major version upgrades, if a non-default cluster parameter group is currently in use, a new cluster parameter group in the cluster parameter group family for the new version must be specified. The new cluster parameter group can be the default for that cluster parameter group family. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

Example: 1.0

pub cluster_version: Option, ///

The Elastic IP (EIP) address for the cluster.

Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide.

pub elastic_ip: Option, ///

Indicates whether the cluster is encrypted. If the value is encrypted (true) and you provide a value for the KmsKeyId parameter, we encrypt the cluster with the provided KmsKeyId. If you don't provide a KmsKeyId, we encrypt with the default key. In the China region we use legacy encryption if you specify that the cluster is encrypted.

If the value is not encrypted (false), then the cluster is decrypted.

pub encrypted: Option, ///

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

pub enhanced_vpc_routing: Option, ///

Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

pub hsm_client_certificate_identifier: Option, ///

Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

pub hsm_configuration_identifier: Option, ///

The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster.

pub kms_key_id: Option, ///

The name for the maintenance track that you want to assign for the cluster. This name change is asynchronous. The new track name stays in the PendingModifiedValues for the cluster until the next maintenance window. When the maintenance track changes, the cluster is switched to the latest cluster release available for the maintenance track. At this point, the maintenance track name is applied.

pub maintenance_track_name: Option, ///

The default for number of days that a newly created manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely. This value doesn't retroactively change the retention periods of existing manual snapshots.

The value must be either -1 or an integer between 1 and 3,653.

The default value is -1.

pub manual_snapshot_retention_period: Option, ///

The new password for the cluster master user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

Operations never return the password, so this operation provides a way to regain access to the master user account for a cluster if the password is lost.

Default: Uses existing setting.

Constraints:

  • Must be between 8 and 64 characters in length.

  • Must contain at least one uppercase letter.

  • Must contain at least one lowercase letter.

  • Must contain one number.

  • Can be any printable ASCII character (ASCII code 33 to 126) except ' (single quote), " (double quote), \, /, @, or space.

pub master_user_password: Option, ///

The new identifier for the cluster.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an AWS account.

Example: examplecluster

pub new_cluster_identifier: Option, ///

The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge

pub node_type: Option, ///

The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter.

For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

Valid Values: Integer greater than 0.

pub number_of_nodes: Option, ///

The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage.

This maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied.

Default: Uses existing setting.

Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00.

Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

Constraints: Must be at least 30 minutes.

pub preferred_maintenance_window: Option, ///

If true, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available.

pub publicly_accessible: Option, ///

A list of virtual private cloud (VPC) security groups to be associated with the cluster. This change is asynchronously applied as soon as possible.

pub vpc_security_group_ids: Option>, } /// Serialize `ModifyClusterMessage` contents to a `SignedRequest`. struct ModifyClusterMessageSerializer; impl ModifyClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.allow_version_upgrade { params.put( &format!("{}{}", prefix, "AllowVersionUpgrade"), &field_value, ); } if let Some(ref field_value) = obj.automated_snapshot_retention_period { params.put( &format!("{}{}", prefix, "AutomatedSnapshotRetentionPeriod"), &field_value, ); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.cluster_parameter_group_name { params.put( &format!("{}{}", prefix, "ClusterParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.cluster_security_groups { ClusterSecurityGroupNameListSerializer::serialize( params, &format!("{}{}", prefix, "ClusterSecurityGroupName"), field_value, ); } if let Some(ref field_value) = obj.cluster_type { params.put(&format!("{}{}", prefix, "ClusterType"), &field_value); } if let Some(ref field_value) = obj.cluster_version { params.put(&format!("{}{}", prefix, "ClusterVersion"), &field_value); } if let Some(ref field_value) = obj.elastic_ip { params.put(&format!("{}{}", prefix, "ElasticIp"), &field_value); } if let Some(ref field_value) = obj.encrypted { params.put(&format!("{}{}", prefix, "Encrypted"), &field_value); } if let Some(ref field_value) = obj.enhanced_vpc_routing { params.put(&format!("{}{}", prefix, "EnhancedVpcRouting"), &field_value); } if let Some(ref field_value) = obj.hsm_client_certificate_identifier { params.put( &format!("{}{}", prefix, "HsmClientCertificateIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.hsm_configuration_identifier { params.put( &format!("{}{}", prefix, "HsmConfigurationIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } if let Some(ref field_value) = obj.maintenance_track_name { params.put( &format!("{}{}", prefix, "MaintenanceTrackName"), &field_value, ); } if let Some(ref field_value) = obj.manual_snapshot_retention_period { params.put( &format!("{}{}", prefix, "ManualSnapshotRetentionPeriod"), &field_value, ); } if let Some(ref field_value) = obj.master_user_password { params.put(&format!("{}{}", prefix, "MasterUserPassword"), &field_value); } if let Some(ref field_value) = obj.new_cluster_identifier { params.put( &format!("{}{}", prefix, "NewClusterIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.node_type { params.put(&format!("{}{}", prefix, "NodeType"), &field_value); } if let Some(ref field_value) = obj.number_of_nodes { params.put(&format!("{}{}", prefix, "NumberOfNodes"), &field_value); } if let Some(ref field_value) = obj.preferred_maintenance_window { params.put( &format!("{}{}", prefix, "PreferredMaintenanceWindow"), &field_value, ); } if let Some(ref field_value) = obj.publicly_accessible { params.put(&format!("{}{}", prefix, "PubliclyAccessible"), &field_value); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyClusterParameterGroupMessage { ///

The name of the parameter group to be modified.

pub parameter_group_name: String, ///

An array of parameters to be modified. A maximum of 20 parameters can be modified in a single request.

For each parameter to be modified, you must supply at least the parameter name and parameter value; other name-value pairs of the parameter are optional.

For the workload management (WLM) configuration, you must supply all the name-value pairs in the wlm_json_configuration parameter.

pub parameters: Vec, } /// Serialize `ModifyClusterParameterGroupMessage` contents to a `SignedRequest`. struct ModifyClusterParameterGroupMessageSerializer; impl ModifyClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ParameterGroupName"), &obj.parameter_group_name, ); ParametersListSerializer::serialize( params, &format!("{}{}", prefix, "Parameter"), &obj.parameters, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ModifyClusterResult { pub cluster: Option, } #[allow(dead_code)] struct ModifyClusterResultDeserializer; impl ModifyClusterResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ModifyClusterResult, _>(tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyClusterSnapshotMessage { ///

A Boolean option to override an exception if the retention period has already passed.

pub force: Option, ///

The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.

If the manual snapshot falls outside of the new retention period, you can specify the force option to immediately delete the snapshot.

The value must be either -1 or an integer between 1 and 3,653.

pub manual_snapshot_retention_period: Option, ///

The identifier of the snapshot whose setting you want to modify.

pub snapshot_identifier: String, } /// Serialize `ModifyClusterSnapshotMessage` contents to a `SignedRequest`. struct ModifyClusterSnapshotMessageSerializer; impl ModifyClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.force { params.put(&format!("{}{}", prefix, "Force"), &field_value); } if let Some(ref field_value) = obj.manual_snapshot_retention_period { params.put( &format!("{}{}", prefix, "ManualSnapshotRetentionPeriod"), &field_value, ); } params.put( &format!("{}{}", prefix, "SnapshotIdentifier"), &obj.snapshot_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ModifyClusterSnapshotResult { pub snapshot: Option, } #[allow(dead_code)] struct ModifyClusterSnapshotResultDeserializer; impl ModifyClusterSnapshotResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ModifyClusterSnapshotResult, _>( tag_name, stack, |name, stack, obj| { match name { "Snapshot" => { obj.snapshot = Some(SnapshotDeserializer::deserialize("Snapshot", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyClusterSnapshotScheduleMessage { ///

A unique identifier for the cluster whose snapshot schedule you want to modify.

pub cluster_identifier: String, ///

A boolean to indicate whether to remove the assoiciation between the cluster and the schedule.

pub disassociate_schedule: Option, ///

A unique alphanumeric identifier for the schedule that you want to associate with the cluster.

pub schedule_identifier: Option, } /// Serialize `ModifyClusterSnapshotScheduleMessage` contents to a `SignedRequest`. struct ModifyClusterSnapshotScheduleMessageSerializer; impl ModifyClusterSnapshotScheduleMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyClusterSnapshotScheduleMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.disassociate_schedule { params.put( &format!("{}{}", prefix, "DisassociateSchedule"), &field_value, ); } if let Some(ref field_value) = obj.schedule_identifier { params.put(&format!("{}{}", prefix, "ScheduleIdentifier"), &field_value); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyClusterSubnetGroupMessage { ///

The name of the subnet group to be modified.

pub cluster_subnet_group_name: String, ///

A text description of the subnet group to be modified.

pub description: Option, ///

An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request.

pub subnet_ids: Vec, } /// Serialize `ModifyClusterSubnetGroupMessage` contents to a `SignedRequest`. struct ModifyClusterSubnetGroupMessageSerializer; impl ModifyClusterSubnetGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyClusterSubnetGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterSubnetGroupName"), &obj.cluster_subnet_group_name, ); if let Some(ref field_value) = obj.description { params.put(&format!("{}{}", prefix, "Description"), &field_value); } SubnetIdentifierListSerializer::serialize( params, &format!("{}{}", prefix, "SubnetIdentifier"), &obj.subnet_ids, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ModifyClusterSubnetGroupResult { pub cluster_subnet_group: Option, } #[allow(dead_code)] struct ModifyClusterSubnetGroupResultDeserializer; impl ModifyClusterSubnetGroupResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ModifyClusterSubnetGroupResult, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterSubnetGroup" => { obj.cluster_subnet_group = Some(ClusterSubnetGroupDeserializer::deserialize( "ClusterSubnetGroup", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyEventSubscriptionMessage { ///

A Boolean value indicating if the subscription is enabled. true indicates the subscription is enabled

pub enabled: Option, ///

Specifies the Amazon Redshift event categories to be published by the event notification subscription.

Values: configuration, management, monitoring, security

pub event_categories: Option>, ///

Specifies the Amazon Redshift event severity to be published by the event notification subscription.

Values: ERROR, INFO

pub severity: Option, ///

The Amazon Resource Name (ARN) of the SNS topic to be used by the event notification subscription.

pub sns_topic_arn: Option, ///

A list of one or more identifiers of Amazon Redshift source objects. All of the objects must be of the same type as was specified in the source type parameter. The event subscription will return only events generated by the specified objects. If not specified, then events are returned for all objects within the source type specified.

Example: my-cluster-1, my-cluster-2

Example: my-snapshot-20131010

pub source_ids: Option>, ///

The type of source that will be generating the events. For example, if you want to be notified of events generated by a cluster, you would set this parameter to cluster. If this value is not specified, events are returned for all Amazon Redshift objects in your AWS account. You must specify a source type in order to specify source IDs.

Valid values: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot, and scheduled-action.

pub source_type: Option, ///

The name of the modified Amazon Redshift event notification subscription.

pub subscription_name: String, } /// Serialize `ModifyEventSubscriptionMessage` contents to a `SignedRequest`. struct ModifyEventSubscriptionMessageSerializer; impl ModifyEventSubscriptionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyEventSubscriptionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.enabled { params.put(&format!("{}{}", prefix, "Enabled"), &field_value); } if let Some(ref field_value) = obj.event_categories { EventCategoriesListSerializer::serialize( params, &format!("{}{}", prefix, "EventCategory"), field_value, ); } if let Some(ref field_value) = obj.severity { params.put(&format!("{}{}", prefix, "Severity"), &field_value); } if let Some(ref field_value) = obj.sns_topic_arn { params.put(&format!("{}{}", prefix, "SnsTopicArn"), &field_value); } if let Some(ref field_value) = obj.source_ids { SourceIdsListSerializer::serialize( params, &format!("{}{}", prefix, "SourceId"), field_value, ); } if let Some(ref field_value) = obj.source_type { params.put(&format!("{}{}", prefix, "SourceType"), &field_value); } params.put( &format!("{}{}", prefix, "SubscriptionName"), &obj.subscription_name, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ModifyEventSubscriptionResult { pub event_subscription: Option, } #[allow(dead_code)] struct ModifyEventSubscriptionResultDeserializer; impl ModifyEventSubscriptionResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ModifyEventSubscriptionResult, _>( tag_name, stack, |name, stack, obj| { match name { "EventSubscription" => { obj.event_subscription = Some(EventSubscriptionDeserializer::deserialize( "EventSubscription", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyScheduledActionMessage { ///

A modified enable flag of the scheduled action. If true, the scheduled action is active. If false, the scheduled action is disabled.

pub enable: Option, ///

A modified end time of the scheduled action. For more information about this parameter, see ScheduledAction.

pub end_time: Option, ///

A different IAM role to assume to run the target action. For more information about this parameter, see ScheduledAction.

pub iam_role: Option, ///

A modified schedule in either at( ) or cron( ) format. For more information about this parameter, see ScheduledAction.

pub schedule: Option, ///

A modified description of the scheduled action.

pub scheduled_action_description: Option, ///

The name of the scheduled action to modify.

pub scheduled_action_name: String, ///

A modified start time of the scheduled action. For more information about this parameter, see ScheduledAction.

pub start_time: Option, ///

A modified JSON format of the scheduled action. For more information about this parameter, see ScheduledAction.

pub target_action: Option, } /// Serialize `ModifyScheduledActionMessage` contents to a `SignedRequest`. struct ModifyScheduledActionMessageSerializer; impl ModifyScheduledActionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyScheduledActionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.enable { params.put(&format!("{}{}", prefix, "Enable"), &field_value); } if let Some(ref field_value) = obj.end_time { params.put(&format!("{}{}", prefix, "EndTime"), &field_value); } if let Some(ref field_value) = obj.iam_role { params.put(&format!("{}{}", prefix, "IamRole"), &field_value); } if let Some(ref field_value) = obj.schedule { params.put(&format!("{}{}", prefix, "Schedule"), &field_value); } if let Some(ref field_value) = obj.scheduled_action_description { params.put( &format!("{}{}", prefix, "ScheduledActionDescription"), &field_value, ); } params.put( &format!("{}{}", prefix, "ScheduledActionName"), &obj.scheduled_action_name, ); if let Some(ref field_value) = obj.start_time { params.put(&format!("{}{}", prefix, "StartTime"), &field_value); } if let Some(ref field_value) = obj.target_action { ScheduledActionTypeSerializer::serialize( params, &format!("{}{}", prefix, "TargetAction"), field_value, ); } } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifySnapshotCopyRetentionPeriodMessage { ///

The unique identifier of the cluster for which you want to change the retention period for either automated or manual snapshots that are copied to a destination AWS Region.

Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.

pub cluster_identifier: String, ///

Indicates whether to apply the snapshot retention period to newly copied manual snapshots instead of automated snapshots.

pub manual: Option, ///

The number of days to retain automated snapshots in the destination AWS Region after they are copied from the source AWS Region.

By default, this only changes the retention period of copied automated snapshots.

If you decrease the retention period for automated snapshots that are copied to a destination AWS Region, Amazon Redshift deletes any existing automated snapshots that were copied to the destination AWS Region and that fall outside of the new retention period.

Constraints: Must be at least 1 and no more than 35 for automated snapshots.

If you specify the manual option, only newly copied manual snapshots will have the new retention period.

If you specify the value of -1 newly copied manual snapshots are retained indefinitely.

Constraints: The number of days must be either -1 or an integer between 1 and 3,653 for manual snapshots.

pub retention_period: i64, } /// Serialize `ModifySnapshotCopyRetentionPeriodMessage` contents to a `SignedRequest`. struct ModifySnapshotCopyRetentionPeriodMessageSerializer; impl ModifySnapshotCopyRetentionPeriodMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifySnapshotCopyRetentionPeriodMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.manual { params.put(&format!("{}{}", prefix, "Manual"), &field_value); } params.put( &format!("{}{}", prefix, "RetentionPeriod"), &obj.retention_period, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ModifySnapshotCopyRetentionPeriodResult { pub cluster: Option, } #[allow(dead_code)] struct ModifySnapshotCopyRetentionPeriodResultDeserializer; impl ModifySnapshotCopyRetentionPeriodResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ModifySnapshotCopyRetentionPeriodResult, _>( tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifySnapshotScheduleMessage { ///

An updated list of schedule definitions. A schedule definition is made up of schedule expressions, for example, "cron(30 12 *)" or "rate(12 hours)".

pub schedule_definitions: Vec, ///

A unique alphanumeric identifier of the schedule to modify.

pub schedule_identifier: String, } /// Serialize `ModifySnapshotScheduleMessage` contents to a `SignedRequest`. struct ModifySnapshotScheduleMessageSerializer; impl ModifySnapshotScheduleMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifySnapshotScheduleMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } ScheduleDefinitionListSerializer::serialize( params, &format!("{}{}", prefix, "ScheduleDefinition"), &obj.schedule_definitions, ); params.put( &format!("{}{}", prefix, "ScheduleIdentifier"), &obj.schedule_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ModifyUsageLimitMessage { ///

The new limit amount. For more information about this parameter, see UsageLimit.

pub amount: Option, ///

The new action that Amazon Redshift takes when the limit is reached. For more information about this parameter, see UsageLimit.

pub breach_action: Option, ///

The identifier of the usage limit to modify.

pub usage_limit_id: String, } /// Serialize `ModifyUsageLimitMessage` contents to a `SignedRequest`. struct ModifyUsageLimitMessageSerializer; impl ModifyUsageLimitMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyUsageLimitMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.amount { params.put(&format!("{}{}", prefix, "Amount"), &field_value); } if let Some(ref field_value) = obj.breach_action { params.put(&format!("{}{}", prefix, "BreachAction"), &field_value); } params.put( &format!("{}{}", prefix, "UsageLimitId"), &obj.usage_limit_id, ); } } ///

A list of node configurations.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct NodeConfigurationOption { ///

The estimated disk utilizaton percentage.

pub estimated_disk_utilization_percent: Option, ///

The category of the node configuration recommendation.

pub mode: Option, ///

The node type, such as, "ds2.8xlarge".

pub node_type: Option, ///

The number of nodes.

pub number_of_nodes: Option, } #[allow(dead_code)] struct NodeConfigurationOptionDeserializer; impl NodeConfigurationOptionDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, NodeConfigurationOption, _>( tag_name, stack, |name, stack, obj| { match name { "EstimatedDiskUtilizationPercent" => { obj.estimated_disk_utilization_percent = Some(DoubleOptionalDeserializer::deserialize( "EstimatedDiskUtilizationPercent", stack, )?); } "Mode" => { obj.mode = Some(ModeDeserializer::deserialize("Mode", stack)?); } "NodeType" => { obj.node_type = Some(StringDeserializer::deserialize("NodeType", stack)?); } "NumberOfNodes" => { obj.number_of_nodes = Some(IntegerDeserializer::deserialize("NumberOfNodes", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct NodeConfigurationOptionListDeserializer; impl NodeConfigurationOptionListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "NodeConfigurationOption" { obj.push(NodeConfigurationOptionDeserializer::deserialize( "NodeConfigurationOption", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

A set of elements to filter the returned node configurations.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct NodeConfigurationOptionsFilter { ///

The name of the element to filter.

pub name: Option, ///

The filter operator. If filter Name is NodeType only the 'in' operator is supported. Provide one value to evaluate for 'eq', 'lt', 'le', 'gt', and 'ge'. Provide two values to evaluate for 'between'. Provide a list of values for 'in'.

pub operator: Option, ///

List of values. Compare Name using Operator to Values. If filter Name is NumberOfNodes, then values can range from 0 to 200. If filter Name is EstimatedDiskUtilizationPercent, then values can range from 0 to 100. For example, filter NumberOfNodes (name) GT (operator) 3 (values).

pub values: Option>, } /// Serialize `NodeConfigurationOptionsFilter` contents to a `SignedRequest`. struct NodeConfigurationOptionsFilterSerializer; impl NodeConfigurationOptionsFilterSerializer { fn serialize(params: &mut Params, name: &str, obj: &NodeConfigurationOptionsFilter) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.name { params.put(&format!("{}{}", prefix, "Name"), &field_value); } if let Some(ref field_value) = obj.operator { params.put(&format!("{}{}", prefix, "Operator"), &field_value); } if let Some(ref field_value) = obj.values { ValueStringListSerializer::serialize( params, &format!("{}{}", prefix, "item"), field_value, ); } } } /// Serialize `NodeConfigurationOptionsFilterList` contents to a `SignedRequest`. struct NodeConfigurationOptionsFilterListSerializer; impl NodeConfigurationOptionsFilterListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); NodeConfigurationOptionsFilterSerializer::serialize(params, &key, obj); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct NodeConfigurationOptionsMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

A list of valid node configurations.

pub node_configuration_option_list: Option>, } #[allow(dead_code)] struct NodeConfigurationOptionsMessageDeserializer; impl NodeConfigurationOptionsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, NodeConfigurationOptionsMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "NodeConfigurationOptionList" => { obj.node_configuration_option_list .get_or_insert(vec![]) .extend(NodeConfigurationOptionListDeserializer::deserialize( "NodeConfigurationOptionList", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes an orderable cluster option.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct OrderableClusterOption { ///

A list of availability zones for the orderable cluster.

pub availability_zones: Option>, ///

The cluster type, for example multi-node.

pub cluster_type: Option, ///

The version of the orderable cluster.

pub cluster_version: Option, ///

The node type for the orderable cluster.

pub node_type: Option, } #[allow(dead_code)] struct OrderableClusterOptionDeserializer; impl OrderableClusterOptionDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, OrderableClusterOption, _>(tag_name, stack, |name, stack, obj| { match name { "AvailabilityZones" => { obj.availability_zones.get_or_insert(vec![]).extend( AvailabilityZoneListDeserializer::deserialize("AvailabilityZones", stack)?, ); } "ClusterType" => { obj.cluster_type = Some(StringDeserializer::deserialize("ClusterType", stack)?); } "ClusterVersion" => { obj.cluster_version = Some(StringDeserializer::deserialize("ClusterVersion", stack)?); } "NodeType" => { obj.node_type = Some(StringDeserializer::deserialize("NodeType", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct OrderableClusterOptionsListDeserializer; impl OrderableClusterOptionsListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "OrderableClusterOption" { obj.push(OrderableClusterOptionDeserializer::deserialize( "OrderableClusterOption", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Contains the output from the DescribeOrderableClusterOptions action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct OrderableClusterOptionsMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

An OrderableClusterOption structure containing information about orderable options for the cluster.

pub orderable_cluster_options: Option>, } #[allow(dead_code)] struct OrderableClusterOptionsMessageDeserializer; impl OrderableClusterOptionsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, OrderableClusterOptionsMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "OrderableClusterOptions" => { obj.orderable_cluster_options.get_or_insert(vec![]).extend( OrderableClusterOptionsListDeserializer::deserialize( "OrderableClusterOptions", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes a parameter in a cluster parameter group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct Parameter { ///

The valid range of values for the parameter.

pub allowed_values: Option, ///

Specifies how to apply the WLM configuration parameter. Some properties can be applied dynamically, while other properties require that any associated clusters be rebooted for the configuration changes to be applied. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

pub apply_type: Option, ///

The data type of the parameter.

pub data_type: Option, ///

A description of the parameter.

pub description: Option, ///

If true, the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

pub is_modifiable: Option, ///

The earliest engine version to which the parameter can apply.

pub minimum_engine_version: Option, ///

The name of the parameter.

pub parameter_name: Option, ///

The value of the parameter.

pub parameter_value: Option, ///

The source of the parameter value, such as "engine-default" or "user".

pub source: Option, } #[allow(dead_code)] struct ParameterDeserializer; impl ParameterDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, Parameter, _>(tag_name, stack, |name, stack, obj| { match name { "AllowedValues" => { obj.allowed_values = Some(StringDeserializer::deserialize("AllowedValues", stack)?); } "ApplyType" => { obj.apply_type = Some(ParameterApplyTypeDeserializer::deserialize( "ApplyType", stack, )?); } "DataType" => { obj.data_type = Some(StringDeserializer::deserialize("DataType", stack)?); } "Description" => { obj.description = Some(StringDeserializer::deserialize("Description", stack)?); } "IsModifiable" => { obj.is_modifiable = Some(BooleanDeserializer::deserialize("IsModifiable", stack)?); } "MinimumEngineVersion" => { obj.minimum_engine_version = Some(StringDeserializer::deserialize( "MinimumEngineVersion", stack, )?); } "ParameterName" => { obj.parameter_name = Some(StringDeserializer::deserialize("ParameterName", stack)?); } "ParameterValue" => { obj.parameter_value = Some(StringDeserializer::deserialize("ParameterValue", stack)?); } "Source" => { obj.source = Some(StringDeserializer::deserialize("Source", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } /// Serialize `Parameter` contents to a `SignedRequest`. struct ParameterSerializer; impl ParameterSerializer { fn serialize(params: &mut Params, name: &str, obj: &Parameter) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.allowed_values { params.put(&format!("{}{}", prefix, "AllowedValues"), &field_value); } if let Some(ref field_value) = obj.apply_type { params.put(&format!("{}{}", prefix, "ApplyType"), &field_value); } if let Some(ref field_value) = obj.data_type { params.put(&format!("{}{}", prefix, "DataType"), &field_value); } if let Some(ref field_value) = obj.description { params.put(&format!("{}{}", prefix, "Description"), &field_value); } if let Some(ref field_value) = obj.is_modifiable { params.put(&format!("{}{}", prefix, "IsModifiable"), &field_value); } if let Some(ref field_value) = obj.minimum_engine_version { params.put( &format!("{}{}", prefix, "MinimumEngineVersion"), &field_value, ); } if let Some(ref field_value) = obj.parameter_name { params.put(&format!("{}{}", prefix, "ParameterName"), &field_value); } if let Some(ref field_value) = obj.parameter_value { params.put(&format!("{}{}", prefix, "ParameterValue"), &field_value); } if let Some(ref field_value) = obj.source { params.put(&format!("{}{}", prefix, "Source"), &field_value); } } } #[allow(dead_code)] struct ParameterApplyTypeDeserializer; impl ParameterApplyTypeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } #[allow(dead_code)] struct ParameterGroupListDeserializer; impl ParameterGroupListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ClusterParameterGroup" { obj.push(ClusterParameterGroupDeserializer::deserialize( "ClusterParameterGroup", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct ParametersListDeserializer; impl ParametersListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "Parameter" { obj.push(ParameterDeserializer::deserialize("Parameter", stack)?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `ParametersList` contents to a `SignedRequest`. struct ParametersListSerializer; impl ParametersListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); ParameterSerializer::serialize(params, &key, obj); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct PauseClusterMessage { ///

The identifier of the cluster to be paused.

pub cluster_identifier: String, } #[allow(dead_code)] struct PauseClusterMessageDeserializer; impl PauseClusterMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, PauseClusterMessage, _>(tag_name, stack, |name, stack, obj| { match name { "ClusterIdentifier" => { obj.cluster_identifier = StringDeserializer::deserialize("ClusterIdentifier", stack)?; } _ => skip_tree(stack), } Ok(()) }) } } /// Serialize `PauseClusterMessage` contents to a `SignedRequest`. struct PauseClusterMessageSerializer; impl PauseClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &PauseClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct PauseClusterResult { pub cluster: Option, } #[allow(dead_code)] struct PauseClusterResultDeserializer; impl PauseClusterResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, PauseClusterResult, _>(tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct PendingActionsListDeserializer; impl PendingActionsListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "member" { obj.push(StringDeserializer::deserialize("member", stack)?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes cluster attributes that are in a pending state. A change to one or more the attributes was requested and is in progress or will be applied.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct PendingModifiedValues { ///

The pending or in-progress change of the automated snapshot retention period.

pub automated_snapshot_retention_period: Option, ///

The pending or in-progress change of the new identifier for the cluster.

pub cluster_identifier: Option, ///

The pending or in-progress change of the cluster type.

pub cluster_type: Option, ///

The pending or in-progress change of the service version.

pub cluster_version: Option, ///

The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.

pub encryption_type: Option, ///

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

pub enhanced_vpc_routing: Option, ///

The name of the maintenance track that the cluster will change to during the next maintenance window.

pub maintenance_track_name: Option, ///

The pending or in-progress change of the master user password for the cluster.

pub master_user_password: Option, ///

The pending or in-progress change of the cluster's node type.

pub node_type: Option, ///

The pending or in-progress change of the number of nodes in the cluster.

pub number_of_nodes: Option, ///

The pending or in-progress change of the ability to connect to the cluster from the public network.

pub publicly_accessible: Option, } #[allow(dead_code)] struct PendingModifiedValuesDeserializer; impl PendingModifiedValuesDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, PendingModifiedValues, _>(tag_name, stack, |name, stack, obj| { match name { "AutomatedSnapshotRetentionPeriod" => { obj.automated_snapshot_retention_period = Some(IntegerOptionalDeserializer::deserialize( "AutomatedSnapshotRetentionPeriod", stack, )?); } "ClusterIdentifier" => { obj.cluster_identifier = Some(StringDeserializer::deserialize("ClusterIdentifier", stack)?); } "ClusterType" => { obj.cluster_type = Some(StringDeserializer::deserialize("ClusterType", stack)?); } "ClusterVersion" => { obj.cluster_version = Some(StringDeserializer::deserialize("ClusterVersion", stack)?); } "EncryptionType" => { obj.encryption_type = Some(StringDeserializer::deserialize("EncryptionType", stack)?); } "EnhancedVpcRouting" => { obj.enhanced_vpc_routing = Some(BooleanOptionalDeserializer::deserialize( "EnhancedVpcRouting", stack, )?); } "MaintenanceTrackName" => { obj.maintenance_track_name = Some(StringDeserializer::deserialize( "MaintenanceTrackName", stack, )?); } "MasterUserPassword" => { obj.master_user_password = Some(StringDeserializer::deserialize( "MasterUserPassword", stack, )?); } "NodeType" => { obj.node_type = Some(StringDeserializer::deserialize("NodeType", stack)?); } "NumberOfNodes" => { obj.number_of_nodes = Some(IntegerOptionalDeserializer::deserialize( "NumberOfNodes", stack, )?); } "PubliclyAccessible" => { obj.publicly_accessible = Some(BooleanOptionalDeserializer::deserialize( "PubliclyAccessible", stack, )?); } _ => skip_tree(stack), } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct PurchaseReservedNodeOfferingMessage { ///

The number of reserved nodes that you want to purchase.

Default: 1

pub node_count: Option, ///

The unique identifier of the reserved node offering you want to purchase.

pub reserved_node_offering_id: String, } /// Serialize `PurchaseReservedNodeOfferingMessage` contents to a `SignedRequest`. struct PurchaseReservedNodeOfferingMessageSerializer; impl PurchaseReservedNodeOfferingMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &PurchaseReservedNodeOfferingMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.node_count { params.put(&format!("{}{}", prefix, "NodeCount"), &field_value); } params.put( &format!("{}{}", prefix, "ReservedNodeOfferingId"), &obj.reserved_node_offering_id, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct PurchaseReservedNodeOfferingResult { pub reserved_node: Option, } #[allow(dead_code)] struct PurchaseReservedNodeOfferingResultDeserializer; impl PurchaseReservedNodeOfferingResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, PurchaseReservedNodeOfferingResult, _>( tag_name, stack, |name, stack, obj| { match name { "ReservedNode" => { obj.reserved_node = Some(ReservedNodeDeserializer::deserialize( "ReservedNode", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct RebootClusterMessage { ///

The cluster identifier.

pub cluster_identifier: String, } /// Serialize `RebootClusterMessage` contents to a `SignedRequest`. struct RebootClusterMessageSerializer; impl RebootClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RebootClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RebootClusterResult { pub cluster: Option, } #[allow(dead_code)] struct RebootClusterResultDeserializer; impl RebootClusterResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RebootClusterResult, _>(tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes a recurring charge.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RecurringCharge { ///

The amount charged per the period of time specified by the recurring charge frequency.

pub recurring_charge_amount: Option, ///

The frequency at which the recurring charge amount is applied.

pub recurring_charge_frequency: Option, } #[allow(dead_code)] struct RecurringChargeDeserializer; impl RecurringChargeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RecurringCharge, _>(tag_name, stack, |name, stack, obj| { match name { "RecurringChargeAmount" => { obj.recurring_charge_amount = Some(DoubleDeserializer::deserialize( "RecurringChargeAmount", stack, )?); } "RecurringChargeFrequency" => { obj.recurring_charge_frequency = Some(StringDeserializer::deserialize( "RecurringChargeFrequency", stack, )?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct RecurringChargeListDeserializer; impl RecurringChargeListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "RecurringCharge" { obj.push(RecurringChargeDeserializer::deserialize( "RecurringCharge", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes a reserved node. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ReservedNode { ///

The currency code for the reserved cluster.

pub currency_code: Option, ///

The duration of the node reservation in seconds.

pub duration: Option, ///

The fixed cost Amazon Redshift charges you for this reserved node.

pub fixed_price: Option, ///

The number of reserved compute nodes.

pub node_count: Option, ///

The node type of the reserved node.

pub node_type: Option, ///

The anticipated utilization of the reserved node, as defined in the reserved node offering.

pub offering_type: Option, ///

The recurring charges for the reserved node.

pub recurring_charges: Option>, ///

The unique identifier for the reservation.

pub reserved_node_id: Option, ///

The identifier for the reserved node offering.

pub reserved_node_offering_id: Option, ///

pub reserved_node_offering_type: Option, ///

The time the reservation started. You purchase a reserved node offering for a duration. This is the start time of that duration.

pub start_time: Option, ///

The state of the reserved compute node.

Possible Values:

  • pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.

  • active-This reserved node is owned by the caller and is available for use.

  • payment-failed-Payment failed for the purchase attempt.

  • retired-The reserved node is no longer available.

  • exchanging-The owner is exchanging the reserved node for another reserved node.

pub state: Option, ///

The hourly rate Amazon Redshift charges you for this reserved node.

pub usage_price: Option, } #[allow(dead_code)] struct ReservedNodeDeserializer; impl ReservedNodeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ReservedNode, _>(tag_name, stack, |name, stack, obj| { match name { "CurrencyCode" => { obj.currency_code = Some(StringDeserializer::deserialize("CurrencyCode", stack)?); } "Duration" => { obj.duration = Some(IntegerDeserializer::deserialize("Duration", stack)?); } "FixedPrice" => { obj.fixed_price = Some(DoubleDeserializer::deserialize("FixedPrice", stack)?); } "NodeCount" => { obj.node_count = Some(IntegerDeserializer::deserialize("NodeCount", stack)?); } "NodeType" => { obj.node_type = Some(StringDeserializer::deserialize("NodeType", stack)?); } "OfferingType" => { obj.offering_type = Some(StringDeserializer::deserialize("OfferingType", stack)?); } "RecurringCharges" => { obj.recurring_charges.get_or_insert(vec![]).extend( RecurringChargeListDeserializer::deserialize("RecurringCharges", stack)?, ); } "ReservedNodeId" => { obj.reserved_node_id = Some(StringDeserializer::deserialize("ReservedNodeId", stack)?); } "ReservedNodeOfferingId" => { obj.reserved_node_offering_id = Some(StringDeserializer::deserialize( "ReservedNodeOfferingId", stack, )?); } "ReservedNodeOfferingType" => { obj.reserved_node_offering_type = Some(ReservedNodeOfferingTypeDeserializer::deserialize( "ReservedNodeOfferingType", stack, )?); } "StartTime" => { obj.start_time = Some(TStampDeserializer::deserialize("StartTime", stack)?); } "State" => { obj.state = Some(StringDeserializer::deserialize("State", stack)?); } "UsagePrice" => { obj.usage_price = Some(DoubleDeserializer::deserialize("UsagePrice", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct ReservedNodeListDeserializer; impl ReservedNodeListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ReservedNode" { obj.push(ReservedNodeDeserializer::deserialize( "ReservedNode", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes a reserved node offering.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ReservedNodeOffering { ///

The currency code for the compute nodes offering.

pub currency_code: Option, ///

The duration, in seconds, for which the offering will reserve the node.

pub duration: Option, ///

The upfront fixed charge you will pay to purchase the specific reserved node offering.

pub fixed_price: Option, ///

The node type offered by the reserved node offering.

pub node_type: Option, ///

The anticipated utilization of the reserved node, as defined in the reserved node offering.

pub offering_type: Option, ///

The charge to your account regardless of whether you are creating any clusters using the node offering. Recurring charges are only in effect for heavy-utilization reserved nodes.

pub recurring_charges: Option>, ///

The offering identifier.

pub reserved_node_offering_id: Option, ///

pub reserved_node_offering_type: Option, ///

The rate you are charged for each hour the cluster that is using the offering is running.

pub usage_price: Option, } #[allow(dead_code)] struct ReservedNodeOfferingDeserializer; impl ReservedNodeOfferingDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ReservedNodeOffering, _>(tag_name, stack, |name, stack, obj| { match name { "CurrencyCode" => { obj.currency_code = Some(StringDeserializer::deserialize("CurrencyCode", stack)?); } "Duration" => { obj.duration = Some(IntegerDeserializer::deserialize("Duration", stack)?); } "FixedPrice" => { obj.fixed_price = Some(DoubleDeserializer::deserialize("FixedPrice", stack)?); } "NodeType" => { obj.node_type = Some(StringDeserializer::deserialize("NodeType", stack)?); } "OfferingType" => { obj.offering_type = Some(StringDeserializer::deserialize("OfferingType", stack)?); } "RecurringCharges" => { obj.recurring_charges.get_or_insert(vec![]).extend( RecurringChargeListDeserializer::deserialize("RecurringCharges", stack)?, ); } "ReservedNodeOfferingId" => { obj.reserved_node_offering_id = Some(StringDeserializer::deserialize( "ReservedNodeOfferingId", stack, )?); } "ReservedNodeOfferingType" => { obj.reserved_node_offering_type = Some(ReservedNodeOfferingTypeDeserializer::deserialize( "ReservedNodeOfferingType", stack, )?); } "UsagePrice" => { obj.usage_price = Some(DoubleDeserializer::deserialize("UsagePrice", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct ReservedNodeOfferingListDeserializer; impl ReservedNodeOfferingListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ReservedNodeOffering" { obj.push(ReservedNodeOfferingDeserializer::deserialize( "ReservedNodeOffering", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct ReservedNodeOfferingTypeDeserializer; impl ReservedNodeOfferingTypeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ReservedNodeOfferingsMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

A list of ReservedNodeOffering objects.

pub reserved_node_offerings: Option>, } #[allow(dead_code)] struct ReservedNodeOfferingsMessageDeserializer; impl ReservedNodeOfferingsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ReservedNodeOfferingsMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "ReservedNodeOfferings" => { obj.reserved_node_offerings.get_or_insert(vec![]).extend( ReservedNodeOfferingListDeserializer::deserialize( "ReservedNodeOfferings", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ReservedNodesMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

The list of ReservedNode objects.

pub reserved_nodes: Option>, } #[allow(dead_code)] struct ReservedNodesMessageDeserializer; impl ReservedNodesMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ReservedNodesMessage, _>(tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "ReservedNodes" => { obj.reserved_nodes.get_or_insert(vec![]).extend( ReservedNodeListDeserializer::deserialize("ReservedNodes", stack)?, ); } _ => skip_tree(stack), } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ResetClusterParameterGroupMessage { ///

The name of the cluster parameter group to be reset.

pub parameter_group_name: String, ///

An array of names of parameters to be reset. If ResetAllParameters option is not used, then at least one parameter name must be supplied.

Constraints: A maximum of 20 parameters can be reset in a single request.

pub parameters: Option>, ///

If true, all parameters in the specified parameter group will be reset to their default values.

Default: true

pub reset_all_parameters: Option, } /// Serialize `ResetClusterParameterGroupMessage` contents to a `SignedRequest`. struct ResetClusterParameterGroupMessageSerializer; impl ResetClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ResetClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ParameterGroupName"), &obj.parameter_group_name, ); if let Some(ref field_value) = obj.parameters { ParametersListSerializer::serialize( params, &format!("{}{}", prefix, "Parameter"), field_value, ); } if let Some(ref field_value) = obj.reset_all_parameters { params.put(&format!("{}{}", prefix, "ResetAllParameters"), &field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ResizeClusterMessage { ///

A boolean value indicating whether the resize operation is using the classic resize process. If you don't provide this parameter or set the value to false, the resize type is elastic.

pub classic: Option, ///

The unique identifier for the cluster to resize.

pub cluster_identifier: String, ///

The new cluster type for the specified cluster.

pub cluster_type: Option, ///

The new node type for the nodes you are adding. If not specified, the cluster's current node type is used.

pub node_type: Option, ///

The new number of nodes for the cluster.

pub number_of_nodes: Option, } #[allow(dead_code)] struct ResizeClusterMessageDeserializer; impl ResizeClusterMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ResizeClusterMessage, _>(tag_name, stack, |name, stack, obj| { match name { "Classic" => { obj.classic = Some(BooleanOptionalDeserializer::deserialize("Classic", stack)?); } "ClusterIdentifier" => { obj.cluster_identifier = StringDeserializer::deserialize("ClusterIdentifier", stack)?; } "ClusterType" => { obj.cluster_type = Some(StringDeserializer::deserialize("ClusterType", stack)?); } "NodeType" => { obj.node_type = Some(StringDeserializer::deserialize("NodeType", stack)?); } "NumberOfNodes" => { obj.number_of_nodes = Some(IntegerDeserializer::deserialize("NumberOfNodes", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } /// Serialize `ResizeClusterMessage` contents to a `SignedRequest`. struct ResizeClusterMessageSerializer; impl ResizeClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ResizeClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.classic { params.put(&format!("{}{}", prefix, "Classic"), &field_value); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.cluster_type { params.put(&format!("{}{}", prefix, "ClusterType"), &field_value); } if let Some(ref field_value) = obj.node_type { params.put(&format!("{}{}", prefix, "NodeType"), &field_value); } if let Some(ref field_value) = obj.number_of_nodes { params.put(&format!("{}{}", prefix, "NumberOfNodes"), &field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ResizeClusterResult { pub cluster: Option, } #[allow(dead_code)] struct ResizeClusterResultDeserializer; impl ResizeClusterResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ResizeClusterResult, _>(tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes a resize operation.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ResizeInfo { ///

A boolean value indicating if the resize operation can be cancelled.

pub allow_cancel_resize: Option, ///

Returns the value ClassicResize.

pub resize_type: Option, } #[allow(dead_code)] struct ResizeInfoDeserializer; impl ResizeInfoDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ResizeInfo, _>(tag_name, stack, |name, stack, obj| { match name { "AllowCancelResize" => { obj.allow_cancel_resize = Some(BooleanDeserializer::deserialize( "AllowCancelResize", stack, )?); } "ResizeType" => { obj.resize_type = Some(StringDeserializer::deserialize("ResizeType", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes the result of a cluster resize operation.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ResizeProgressMessage { ///

The average rate of the resize operation over the last few minutes, measured in megabytes per second. After the resize operation completes, this value shows the average rate of the entire resize operation.

pub avg_resize_rate_in_mega_bytes_per_second: Option, ///

The percent of data transferred from source cluster to target cluster.

pub data_transfer_progress_percent: Option, ///

The amount of seconds that have elapsed since the resize operation began. After the resize operation completes, this value shows the total actual time, in seconds, for the resize operation.

pub elapsed_time_in_seconds: Option, ///

The estimated time remaining, in seconds, until the resize operation is complete. This value is calculated based on the average resize rate and the estimated amount of data remaining to be processed. Once the resize operation is complete, this value will be 0.

pub estimated_time_to_completion_in_seconds: Option, ///

The names of tables that have been completely imported .

Valid Values: List of table names.

pub import_tables_completed: Option>, ///

The names of tables that are being currently imported.

Valid Values: List of table names.

pub import_tables_in_progress: Option>, ///

The names of tables that have not been yet imported.

Valid Values: List of table names

pub import_tables_not_started: Option>, ///

An optional string to provide additional details about the resize action.

pub message: Option, ///

While the resize operation is in progress, this value shows the current amount of data, in megabytes, that has been processed so far. When the resize operation is complete, this value shows the total amount of data, in megabytes, on the cluster, which may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data before resize).

pub progress_in_mega_bytes: Option, ///

An enum with possible values of ClassicResize and ElasticResize. These values describe the type of resize operation being performed.

pub resize_type: Option, ///

The status of the resize operation.

Valid Values: NONE | IN_PROGRESS | FAILED | SUCCEEDED | CANCELLING

pub status: Option, ///

The cluster type after the resize operation is complete.

Valid Values: multi-node | single-node

pub target_cluster_type: Option, ///

The type of encryption for the cluster after the resize is complete.

Possible values are KMS and None. In the China region possible values are: Legacy and None.

pub target_encryption_type: Option, ///

The node type that the cluster will have after the resize operation is complete.

pub target_node_type: Option, ///

The number of nodes that the cluster will have after the resize operation is complete.

pub target_number_of_nodes: Option, ///

The estimated total amount of data, in megabytes, on the cluster before the resize operation began.

pub total_resize_data_in_mega_bytes: Option, } #[allow(dead_code)] struct ResizeProgressMessageDeserializer; impl ResizeProgressMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ResizeProgressMessage, _>(tag_name, stack, |name, stack, obj| { match name { "AvgResizeRateInMegaBytesPerSecond" => { obj.avg_resize_rate_in_mega_bytes_per_second = Some(DoubleOptionalDeserializer::deserialize( "AvgResizeRateInMegaBytesPerSecond", stack, )?); } "DataTransferProgressPercent" => { obj.data_transfer_progress_percent = Some(DoubleOptionalDeserializer::deserialize( "DataTransferProgressPercent", stack, )?); } "ElapsedTimeInSeconds" => { obj.elapsed_time_in_seconds = Some(LongOptionalDeserializer::deserialize( "ElapsedTimeInSeconds", stack, )?); } "EstimatedTimeToCompletionInSeconds" => { obj.estimated_time_to_completion_in_seconds = Some(LongOptionalDeserializer::deserialize( "EstimatedTimeToCompletionInSeconds", stack, )?); } "ImportTablesCompleted" => { obj.import_tables_completed.get_or_insert(vec![]).extend( ImportTablesCompletedDeserializer::deserialize( "ImportTablesCompleted", stack, )?, ); } "ImportTablesInProgress" => { obj.import_tables_in_progress.get_or_insert(vec![]).extend( ImportTablesInProgressDeserializer::deserialize( "ImportTablesInProgress", stack, )?, ); } "ImportTablesNotStarted" => { obj.import_tables_not_started.get_or_insert(vec![]).extend( ImportTablesNotStartedDeserializer::deserialize( "ImportTablesNotStarted", stack, )?, ); } "Message" => { obj.message = Some(StringDeserializer::deserialize("Message", stack)?); } "ProgressInMegaBytes" => { obj.progress_in_mega_bytes = Some(LongOptionalDeserializer::deserialize( "ProgressInMegaBytes", stack, )?); } "ResizeType" => { obj.resize_type = Some(StringDeserializer::deserialize("ResizeType", stack)?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } "TargetClusterType" => { obj.target_cluster_type = Some(StringDeserializer::deserialize("TargetClusterType", stack)?); } "TargetEncryptionType" => { obj.target_encryption_type = Some(StringDeserializer::deserialize( "TargetEncryptionType", stack, )?); } "TargetNodeType" => { obj.target_node_type = Some(StringDeserializer::deserialize("TargetNodeType", stack)?); } "TargetNumberOfNodes" => { obj.target_number_of_nodes = Some(IntegerOptionalDeserializer::deserialize( "TargetNumberOfNodes", stack, )?); } "TotalResizeDataInMegaBytes" => { obj.total_resize_data_in_mega_bytes = Some( LongOptionalDeserializer::deserialize("TotalResizeDataInMegaBytes", stack)?, ); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct RestorableNodeTypeListDeserializer; impl RestorableNodeTypeListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "NodeType" { obj.push(StringDeserializer::deserialize("NodeType", stack)?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct RestoreFromClusterSnapshotMessage { ///

Reserved.

pub additional_info: Option, ///

If true, major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster.

Default: true

pub allow_version_upgrade: Option, ///

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

Default: The value selected for the cluster from which the snapshot was taken.

Constraints: Must be a value from 0 to 35.

pub automated_snapshot_retention_period: Option, ///

The Amazon EC2 Availability Zone in which to restore the cluster.

Default: A random, system-chosen Availability Zone.

Example: us-east-2a

pub availability_zone: Option, ///

The identifier of the cluster that will be created from restoring the snapshot.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • Alphabetic characters must be lowercase.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

  • Must be unique for all clusters within an AWS account.

pub cluster_identifier: String, ///

The name of the parameter group to be associated with this cluster.

Default: The default Amazon Redshift cluster parameter group. For information about the default parameter group, go to Working with Amazon Redshift Parameter Groups.

Constraints:

  • Must be 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

pub cluster_parameter_group_name: Option, ///

A list of security groups to be associated with this cluster.

Default: The default cluster security group for Amazon Redshift.

Cluster security groups only apply to clusters outside of VPCs.

pub cluster_security_groups: Option>, ///

The name of the subnet group where you want to cluster restored.

A snapshot of cluster in VPC can be restored only in VPC. Therefore, you must provide subnet group name where you want the cluster restored.

pub cluster_subnet_group_name: Option, ///

The elastic IP (EIP) address for the cluster.

pub elastic_ip: Option, ///

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

pub enhanced_vpc_routing: Option, ///

Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.

pub hsm_client_certificate_identifier: Option, ///

Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.

pub hsm_configuration_identifier: Option, ///

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. You must supply the IAM roles in their Amazon Resource Name (ARN) format. You can supply up to 10 IAM roles in a single request.

A cluster can have up to 10 IAM roles associated at any time.

pub iam_roles: Option>, ///

The AWS Key Management Service (KMS) key ID of the encryption key that you want to use to encrypt data in the cluster that you restore from a shared snapshot.

pub kms_key_id: Option, ///

The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks.

pub maintenance_track_name: Option, ///

The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots.

The value must be either -1 or an integer between 1 and 3,653.

pub manual_snapshot_retention_period: Option, ///

The node type that the restored cluster will be provisioned with.

Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

pub node_type: Option, ///

The number of nodes specified when provisioning the restored cluster.

pub number_of_nodes: Option, ///

The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.

pub owner_account: Option, ///

The port number on which the cluster accepts connections.

Default: The same port as the original cluster.

Constraints: Must be between 1115 and 65535.

pub port: Option, ///

The weekly time range (in UTC) during which automated cluster maintenance can occur.

Format: ddd:hh24:mi-ddd:hh24:mi

Default: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide.

Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

Constraints: Minimum 30-minute window.

pub preferred_maintenance_window: Option, ///

If true, the cluster can be accessed from a public network.

pub publicly_accessible: Option, ///

The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

pub snapshot_cluster_identifier: Option, ///

The name of the snapshot from which to create the new cluster. This parameter isn't case sensitive.

Example: my-snapshot-id

pub snapshot_identifier: String, ///

A unique identifier for the snapshot schedule.

pub snapshot_schedule_identifier: Option, ///

A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.

Default: The default VPC security group is associated with the cluster.

VPC security groups only apply to clusters in VPCs.

pub vpc_security_group_ids: Option>, } /// Serialize `RestoreFromClusterSnapshotMessage` contents to a `SignedRequest`. struct RestoreFromClusterSnapshotMessageSerializer; impl RestoreFromClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RestoreFromClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.additional_info { params.put(&format!("{}{}", prefix, "AdditionalInfo"), &field_value); } if let Some(ref field_value) = obj.allow_version_upgrade { params.put( &format!("{}{}", prefix, "AllowVersionUpgrade"), &field_value, ); } if let Some(ref field_value) = obj.automated_snapshot_retention_period { params.put( &format!("{}{}", prefix, "AutomatedSnapshotRetentionPeriod"), &field_value, ); } if let Some(ref field_value) = obj.availability_zone { params.put(&format!("{}{}", prefix, "AvailabilityZone"), &field_value); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); if let Some(ref field_value) = obj.cluster_parameter_group_name { params.put( &format!("{}{}", prefix, "ClusterParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.cluster_security_groups { ClusterSecurityGroupNameListSerializer::serialize( params, &format!("{}{}", prefix, "ClusterSecurityGroupName"), field_value, ); } if let Some(ref field_value) = obj.cluster_subnet_group_name { params.put( &format!("{}{}", prefix, "ClusterSubnetGroupName"), &field_value, ); } if let Some(ref field_value) = obj.elastic_ip { params.put(&format!("{}{}", prefix, "ElasticIp"), &field_value); } if let Some(ref field_value) = obj.enhanced_vpc_routing { params.put(&format!("{}{}", prefix, "EnhancedVpcRouting"), &field_value); } if let Some(ref field_value) = obj.hsm_client_certificate_identifier { params.put( &format!("{}{}", prefix, "HsmClientCertificateIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.hsm_configuration_identifier { params.put( &format!("{}{}", prefix, "HsmConfigurationIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.iam_roles { IamRoleArnListSerializer::serialize( params, &format!("{}{}", prefix, "IamRoleArn"), field_value, ); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } if let Some(ref field_value) = obj.maintenance_track_name { params.put( &format!("{}{}", prefix, "MaintenanceTrackName"), &field_value, ); } if let Some(ref field_value) = obj.manual_snapshot_retention_period { params.put( &format!("{}{}", prefix, "ManualSnapshotRetentionPeriod"), &field_value, ); } if let Some(ref field_value) = obj.node_type { params.put(&format!("{}{}", prefix, "NodeType"), &field_value); } if let Some(ref field_value) = obj.number_of_nodes { params.put(&format!("{}{}", prefix, "NumberOfNodes"), &field_value); } if let Some(ref field_value) = obj.owner_account { params.put(&format!("{}{}", prefix, "OwnerAccount"), &field_value); } if let Some(ref field_value) = obj.port { params.put(&format!("{}{}", prefix, "Port"), &field_value); } if let Some(ref field_value) = obj.preferred_maintenance_window { params.put( &format!("{}{}", prefix, "PreferredMaintenanceWindow"), &field_value, ); } if let Some(ref field_value) = obj.publicly_accessible { params.put(&format!("{}{}", prefix, "PubliclyAccessible"), &field_value); } if let Some(ref field_value) = obj.snapshot_cluster_identifier { params.put( &format!("{}{}", prefix, "SnapshotClusterIdentifier"), &field_value, ); } params.put( &format!("{}{}", prefix, "SnapshotIdentifier"), &obj.snapshot_identifier, ); if let Some(ref field_value) = obj.snapshot_schedule_identifier { params.put( &format!("{}{}", prefix, "SnapshotScheduleIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RestoreFromClusterSnapshotResult { pub cluster: Option, } #[allow(dead_code)] struct RestoreFromClusterSnapshotResultDeserializer; impl RestoreFromClusterSnapshotResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RestoreFromClusterSnapshotResult, _>( tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes the status of a cluster restore action. Returns null if the cluster was not created by restoring a snapshot.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RestoreStatus { ///

The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.

pub current_restore_rate_in_mega_bytes_per_second: Option, ///

The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.

pub elapsed_time_in_seconds: Option, ///

The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.

pub estimated_time_to_completion_in_seconds: Option, ///

The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.

pub progress_in_mega_bytes: Option, ///

The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.

pub snapshot_size_in_mega_bytes: Option, ///

The status of the restore action. Returns starting, restoring, completed, or failed.

pub status: Option, } #[allow(dead_code)] struct RestoreStatusDeserializer; impl RestoreStatusDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RestoreStatus, _>(tag_name, stack, |name, stack, obj| { match name { "CurrentRestoreRateInMegaBytesPerSecond" => { obj.current_restore_rate_in_mega_bytes_per_second = Some(DoubleDeserializer::deserialize( "CurrentRestoreRateInMegaBytesPerSecond", stack, )?); } "ElapsedTimeInSeconds" => { obj.elapsed_time_in_seconds = Some(LongDeserializer::deserialize( "ElapsedTimeInSeconds", stack, )?); } "EstimatedTimeToCompletionInSeconds" => { obj.estimated_time_to_completion_in_seconds = Some( LongDeserializer::deserialize("EstimatedTimeToCompletionInSeconds", stack)?, ); } "ProgressInMegaBytes" => { obj.progress_in_mega_bytes = Some(LongDeserializer::deserialize("ProgressInMegaBytes", stack)?); } "SnapshotSizeInMegaBytes" => { obj.snapshot_size_in_mega_bytes = Some(LongDeserializer::deserialize( "SnapshotSizeInMegaBytes", stack, )?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct RestoreTableFromClusterSnapshotMessage { ///

The identifier of the Amazon Redshift cluster to restore the table to.

pub cluster_identifier: String, ///

The name of the table to create as a result of the current request.

pub new_table_name: String, ///

The identifier of the snapshot to restore the table from. This snapshot must have been created from the Amazon Redshift cluster specified by the ClusterIdentifier parameter.

pub snapshot_identifier: String, ///

The name of the source database that contains the table to restore from.

pub source_database_name: String, ///

The name of the source schema that contains the table to restore from. If you do not specify a SourceSchemaName value, the default is public.

pub source_schema_name: Option, ///

The name of the source table to restore from.

pub source_table_name: String, ///

The name of the database to restore the table to.

pub target_database_name: Option, ///

The name of the schema to restore the table to.

pub target_schema_name: Option, } /// Serialize `RestoreTableFromClusterSnapshotMessage` contents to a `SignedRequest`. struct RestoreTableFromClusterSnapshotMessageSerializer; impl RestoreTableFromClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RestoreTableFromClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); params.put( &format!("{}{}", prefix, "NewTableName"), &obj.new_table_name, ); params.put( &format!("{}{}", prefix, "SnapshotIdentifier"), &obj.snapshot_identifier, ); params.put( &format!("{}{}", prefix, "SourceDatabaseName"), &obj.source_database_name, ); if let Some(ref field_value) = obj.source_schema_name { params.put(&format!("{}{}", prefix, "SourceSchemaName"), &field_value); } params.put( &format!("{}{}", prefix, "SourceTableName"), &obj.source_table_name, ); if let Some(ref field_value) = obj.target_database_name { params.put(&format!("{}{}", prefix, "TargetDatabaseName"), &field_value); } if let Some(ref field_value) = obj.target_schema_name { params.put(&format!("{}{}", prefix, "TargetSchemaName"), &field_value); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RestoreTableFromClusterSnapshotResult { pub table_restore_status: Option, } #[allow(dead_code)] struct RestoreTableFromClusterSnapshotResultDeserializer; impl RestoreTableFromClusterSnapshotResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RestoreTableFromClusterSnapshotResult, _>( tag_name, stack, |name, stack, obj| { match name { "TableRestoreStatus" => { obj.table_restore_status = Some(TableRestoreStatusDeserializer::deserialize( "TableRestoreStatus", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ResumeClusterMessage { ///

The identifier of the cluster to be resumed.

pub cluster_identifier: String, } #[allow(dead_code)] struct ResumeClusterMessageDeserializer; impl ResumeClusterMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ResumeClusterMessage, _>(tag_name, stack, |name, stack, obj| { match name { "ClusterIdentifier" => { obj.cluster_identifier = StringDeserializer::deserialize("ClusterIdentifier", stack)?; } _ => skip_tree(stack), } Ok(()) }) } } /// Serialize `ResumeClusterMessage` contents to a `SignedRequest`. struct ResumeClusterMessageSerializer; impl ResumeClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ResumeClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ResumeClusterResult { pub cluster: Option, } #[allow(dead_code)] struct ResumeClusterResultDeserializer; impl ResumeClusterResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ResumeClusterResult, _>(tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes a RevisionTarget.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RevisionTarget { ///

A unique string that identifies the version to update the cluster to. You can use this value in ModifyClusterDbRevision.

pub database_revision: Option, ///

The date on which the database revision was released.

pub database_revision_release_date: Option, ///

A string that describes the changes and features that will be applied to the cluster when it is updated to the corresponding ClusterDbRevision.

pub description: Option, } #[allow(dead_code)] struct RevisionTargetDeserializer; impl RevisionTargetDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RevisionTarget, _>(tag_name, stack, |name, stack, obj| { match name { "DatabaseRevision" => { obj.database_revision = Some(StringDeserializer::deserialize("DatabaseRevision", stack)?); } "DatabaseRevisionReleaseDate" => { obj.database_revision_release_date = Some(TStampDeserializer::deserialize( "DatabaseRevisionReleaseDate", stack, )?); } "Description" => { obj.description = Some(StringDeserializer::deserialize("Description", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct RevisionTargetsListDeserializer; impl RevisionTargetsListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "RevisionTarget" { obj.push(RevisionTargetDeserializer::deserialize( "RevisionTarget", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct RevokeClusterSecurityGroupIngressMessage { ///

The IP range for which to revoke access. This range must be a valid Classless Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided.

pub cidrip: Option, ///

The name of the security Group from which to revoke the ingress rule.

pub cluster_security_group_name: String, ///

The name of the EC2 Security Group whose access is to be revoked. If EC2SecurityGroupName is specified, EC2SecurityGroupOwnerId must also be provided and CIDRIP cannot be provided.

pub ec2_security_group_name: Option, ///

The AWS account number of the owner of the security group specified in the EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must also be provided. and CIDRIP cannot be provided.

Example: 111122223333

pub ec2_security_group_owner_id: Option, } /// Serialize `RevokeClusterSecurityGroupIngressMessage` contents to a `SignedRequest`. struct RevokeClusterSecurityGroupIngressMessageSerializer; impl RevokeClusterSecurityGroupIngressMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RevokeClusterSecurityGroupIngressMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.cidrip { params.put(&format!("{}{}", prefix, "CIDRIP"), &field_value); } params.put( &format!("{}{}", prefix, "ClusterSecurityGroupName"), &obj.cluster_security_group_name, ); if let Some(ref field_value) = obj.ec2_security_group_name { params.put( &format!("{}{}", prefix, "EC2SecurityGroupName"), &field_value, ); } if let Some(ref field_value) = obj.ec2_security_group_owner_id { params.put( &format!("{}{}", prefix, "EC2SecurityGroupOwnerId"), &field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RevokeClusterSecurityGroupIngressResult { pub cluster_security_group: Option, } #[allow(dead_code)] struct RevokeClusterSecurityGroupIngressResultDeserializer; impl RevokeClusterSecurityGroupIngressResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RevokeClusterSecurityGroupIngressResult, _>( tag_name, stack, |name, stack, obj| { match name { "ClusterSecurityGroup" => { obj.cluster_security_group = Some(ClusterSecurityGroupDeserializer::deserialize( "ClusterSecurityGroup", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct RevokeSnapshotAccessMessage { ///

The identifier of the AWS customer account that can no longer restore the specified snapshot.

pub account_with_restore_access: String, ///

The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

pub snapshot_cluster_identifier: Option, ///

The identifier of the snapshot that the account can no longer access.

pub snapshot_identifier: String, } /// Serialize `RevokeSnapshotAccessMessage` contents to a `SignedRequest`. struct RevokeSnapshotAccessMessageSerializer; impl RevokeSnapshotAccessMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RevokeSnapshotAccessMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "AccountWithRestoreAccess"), &obj.account_with_restore_access, ); if let Some(ref field_value) = obj.snapshot_cluster_identifier { params.put( &format!("{}{}", prefix, "SnapshotClusterIdentifier"), &field_value, ); } params.put( &format!("{}{}", prefix, "SnapshotIdentifier"), &obj.snapshot_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RevokeSnapshotAccessResult { pub snapshot: Option, } #[allow(dead_code)] struct RevokeSnapshotAccessResultDeserializer; impl RevokeSnapshotAccessResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RevokeSnapshotAccessResult, _>( tag_name, stack, |name, stack, obj| { match name { "Snapshot" => { obj.snapshot = Some(SnapshotDeserializer::deserialize("Snapshot", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct RotateEncryptionKeyMessage { ///

The unique identifier of the cluster that you want to rotate the encryption keys for.

Constraints: Must be the name of valid cluster that has encryption enabled.

pub cluster_identifier: String, } /// Serialize `RotateEncryptionKeyMessage` contents to a `SignedRequest`. struct RotateEncryptionKeyMessageSerializer; impl RotateEncryptionKeyMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RotateEncryptionKeyMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "ClusterIdentifier"), &obj.cluster_identifier, ); } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct RotateEncryptionKeyResult { pub cluster: Option, } #[allow(dead_code)] struct RotateEncryptionKeyResultDeserializer; impl RotateEncryptionKeyResultDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, RotateEncryptionKeyResult, _>( tag_name, stack, |name, stack, obj| { match name { "Cluster" => { obj.cluster = Some(ClusterDeserializer::deserialize("Cluster", stack)?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct ScheduleDefinitionListDeserializer; impl ScheduleDefinitionListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ScheduleDefinition" { obj.push(StringDeserializer::deserialize( "ScheduleDefinition", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `ScheduleDefinitionList` contents to a `SignedRequest`. struct ScheduleDefinitionListSerializer; impl ScheduleDefinitionListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } #[allow(dead_code)] struct ScheduleStateDeserializer; impl ScheduleStateDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } ///

Describes a scheduled action. You can use a scheduled action to trigger some Amazon Redshift API operations on a schedule. For information about which API operations can be scheduled, see ScheduledActionType.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ScheduledAction { ///

The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.

pub end_time: Option, ///

The IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster Management Guide.

pub iam_role: Option, ///

List of times when the scheduled action will run.

pub next_invocations: Option>, ///

The schedule for a one-time (at format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour.

Format of at expressions is "at(yyyy-mm-ddThh:mm:ss)". For example, "at(2016-03-04T17:27:00)".

Format of cron expressions is "cron(Minutes Hours Day-of-month Month Day-of-week Year)". For example, "cron(0 10 ? * MON *)". For more information, see Cron Expressions in the Amazon CloudWatch Events User Guide.

pub schedule: Option, ///

The description of the scheduled action.

pub scheduled_action_description: Option, ///

The name of the scheduled action.

pub scheduled_action_name: Option, ///

The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger.

pub start_time: Option, ///

The state of the scheduled action. For example, DISABLED.

pub state: Option, ///

A JSON format string of the Amazon Redshift API operation with input parameters.

"{\"ResizeCluster\":{\"NodeType\":\"ds2.8xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}".

pub target_action: Option, } #[allow(dead_code)] struct ScheduledActionDeserializer; impl ScheduledActionDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ScheduledAction, _>(tag_name, stack, |name, stack, obj| { match name { "EndTime" => { obj.end_time = Some(TStampDeserializer::deserialize("EndTime", stack)?); } "IamRole" => { obj.iam_role = Some(StringDeserializer::deserialize("IamRole", stack)?); } "NextInvocations" => { obj.next_invocations.get_or_insert(vec![]).extend( ScheduledActionTimeListDeserializer::deserialize("NextInvocations", stack)?, ); } "Schedule" => { obj.schedule = Some(StringDeserializer::deserialize("Schedule", stack)?); } "ScheduledActionDescription" => { obj.scheduled_action_description = Some(StringDeserializer::deserialize( "ScheduledActionDescription", stack, )?); } "ScheduledActionName" => { obj.scheduled_action_name = Some(StringDeserializer::deserialize( "ScheduledActionName", stack, )?); } "StartTime" => { obj.start_time = Some(TStampDeserializer::deserialize("StartTime", stack)?); } "State" => { obj.state = Some(ScheduledActionStateDeserializer::deserialize( "State", stack, )?); } "TargetAction" => { obj.target_action = Some(ScheduledActionTypeDeserializer::deserialize( "TargetAction", stack, )?); } _ => skip_tree(stack), } Ok(()) }) } } ///

A set of elements to filter the returned scheduled actions.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ScheduledActionFilter { ///

The type of element to filter.

pub name: String, ///

List of values. Compare if the value (of type defined by Name) equals an item in the list of scheduled actions.

pub values: Vec, } /// Serialize `ScheduledActionFilter` contents to a `SignedRequest`. struct ScheduledActionFilterSerializer; impl ScheduledActionFilterSerializer { fn serialize(params: &mut Params, name: &str, obj: &ScheduledActionFilter) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "Name"), &obj.name); ValueStringListSerializer::serialize(params, &format!("{}{}", prefix, "item"), &obj.values); } } /// Serialize `ScheduledActionFilterList` contents to a `SignedRequest`. struct ScheduledActionFilterListSerializer; impl ScheduledActionFilterListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); ScheduledActionFilterSerializer::serialize(params, &key, obj); } } } #[allow(dead_code)] struct ScheduledActionListDeserializer; impl ScheduledActionListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ScheduledAction" { obj.push(ScheduledActionDeserializer::deserialize( "ScheduledAction", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct ScheduledActionStateDeserializer; impl ScheduledActionStateDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } #[allow(dead_code)] struct ScheduledActionTimeListDeserializer; impl ScheduledActionTimeListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "ScheduledActionTime" { obj.push(TStampDeserializer::deserialize( "ScheduledActionTime", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

The action type that specifies an Amazon Redshift API operation that is supported by the Amazon Redshift scheduler.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct ScheduledActionType { ///

An action that runs a PauseCluster API operation.

pub pause_cluster: Option, ///

An action that runs a ResizeCluster API operation.

pub resize_cluster: Option, ///

An action that runs a ResumeCluster API operation.

pub resume_cluster: Option, } #[allow(dead_code)] struct ScheduledActionTypeDeserializer; impl ScheduledActionTypeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ScheduledActionType, _>(tag_name, stack, |name, stack, obj| { match name { "PauseCluster" => { obj.pause_cluster = Some(PauseClusterMessageDeserializer::deserialize( "PauseCluster", stack, )?); } "ResizeCluster" => { obj.resize_cluster = Some(ResizeClusterMessageDeserializer::deserialize( "ResizeCluster", stack, )?); } "ResumeCluster" => { obj.resume_cluster = Some(ResumeClusterMessageDeserializer::deserialize( "ResumeCluster", stack, )?); } _ => skip_tree(stack), } Ok(()) }) } } /// Serialize `ScheduledActionType` contents to a `SignedRequest`. struct ScheduledActionTypeSerializer; impl ScheduledActionTypeSerializer { fn serialize(params: &mut Params, name: &str, obj: &ScheduledActionType) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.pause_cluster { PauseClusterMessageSerializer::serialize( params, &format!("{}{}", prefix, "PauseCluster"), field_value, ); } if let Some(ref field_value) = obj.resize_cluster { ResizeClusterMessageSerializer::serialize( params, &format!("{}{}", prefix, "ResizeCluster"), field_value, ); } if let Some(ref field_value) = obj.resume_cluster { ResumeClusterMessageSerializer::serialize( params, &format!("{}{}", prefix, "ResumeCluster"), field_value, ); } } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct ScheduledActionsMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeScheduledActions request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, ///

List of retrieved scheduled actions.

pub scheduled_actions: Option>, } #[allow(dead_code)] struct ScheduledActionsMessageDeserializer; impl ScheduledActionsMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, ScheduledActionsMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "ScheduledActions" => { obj.scheduled_actions.get_or_insert(vec![]).extend( ScheduledActionListDeserializer::deserialize( "ScheduledActions", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct ScheduledSnapshotTimeListDeserializer; impl ScheduledSnapshotTimeListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "SnapshotTime" { obj.push(TStampDeserializer::deserialize("SnapshotTime", stack)?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct SensitiveStringDeserializer; impl SensitiveStringDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } ///

Describes a snapshot.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct Snapshot { ///

A list of the AWS customer accounts authorized to restore the snapshot. Returns null if no accounts are authorized. Visible only to the snapshot owner.

pub accounts_with_restore_access: Option>, ///

The size of the incremental backup.

pub actual_incremental_backup_size_in_mega_bytes: Option, ///

The Availability Zone in which the cluster was created.

pub availability_zone: Option, ///

The number of megabytes that have been transferred to the snapshot backup.

pub backup_progress_in_mega_bytes: Option, ///

The time (UTC) when the cluster was originally created.

pub cluster_create_time: Option, ///

The identifier of the cluster for which the snapshot was taken.

pub cluster_identifier: Option, ///

The version ID of the Amazon Redshift engine that is running on the cluster.

pub cluster_version: Option, ///

The number of megabytes per second being transferred to the snapshot backup. Returns 0 for a completed backup.

pub current_backup_rate_in_mega_bytes_per_second: Option, ///

The name of the database that was created when the cluster was created.

pub db_name: Option, ///

The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.

pub elapsed_time_in_seconds: Option, ///

If true, the data in the snapshot is encrypted at rest.

pub encrypted: Option, ///

A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. true indicates that the data is encrypted using HSM keys.

pub encrypted_with_hsm: Option, ///

An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide.

If this option is true, enhanced VPC routing is enabled.

Default: false

pub enhanced_vpc_routing: Option, ///

The estimate of the time remaining before the snapshot backup will complete. Returns 0 for a completed backup.

pub estimated_seconds_to_completion: Option, ///

The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.

pub kms_key_id: Option, ///

The name of the maintenance track for the snapshot.

pub maintenance_track_name: Option, ///

The number of days until a manual snapshot will pass its retention period.

pub manual_snapshot_remaining_days: Option, ///

The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.

The value must be either -1 or an integer between 1 and 3,653.

pub manual_snapshot_retention_period: Option, ///

The master user name for the cluster.

pub master_username: Option, ///

The node type of the nodes in the cluster.

pub node_type: Option, ///

The number of nodes in the cluster.

pub number_of_nodes: Option, ///

For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.

pub owner_account: Option, ///

The port that the cluster is listening on.

pub port: Option, ///

The list of node types that this cluster snapshot is able to restore into.

pub restorable_node_types: Option>, ///

The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.

pub snapshot_create_time: Option, ///

The snapshot identifier that is provided in the request.

pub snapshot_identifier: Option, ///

A timestamp representing the start of the retention period for the snapshot.

pub snapshot_retention_start_time: Option, ///

The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".

pub snapshot_type: Option, ///

The source region from which the snapshot was copied.

pub source_region: Option, ///

The snapshot status. The value of the status depends on the API operation used:

pub status: Option, ///

The list of tags for the cluster snapshot.

pub tags: Option>, ///

The size of the complete set of backup data that would be used to restore the cluster.

pub total_backup_size_in_mega_bytes: Option, ///

The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.

pub vpc_id: Option, } #[allow(dead_code)] struct SnapshotDeserializer; impl SnapshotDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, Snapshot, _>(tag_name, stack, |name, stack, obj| { match name { "AccountsWithRestoreAccess" => { obj.accounts_with_restore_access .get_or_insert(vec![]) .extend(AccountsWithRestoreAccessListDeserializer::deserialize( "AccountsWithRestoreAccess", stack, )?); } "ActualIncrementalBackupSizeInMegaBytes" => { obj.actual_incremental_backup_size_in_mega_bytes = Some(DoubleDeserializer::deserialize( "ActualIncrementalBackupSizeInMegaBytes", stack, )?); } "AvailabilityZone" => { obj.availability_zone = Some(StringDeserializer::deserialize("AvailabilityZone", stack)?); } "BackupProgressInMegaBytes" => { obj.backup_progress_in_mega_bytes = Some(DoubleDeserializer::deserialize( "BackupProgressInMegaBytes", stack, )?); } "ClusterCreateTime" => { obj.cluster_create_time = Some(TStampDeserializer::deserialize("ClusterCreateTime", stack)?); } "ClusterIdentifier" => { obj.cluster_identifier = Some(StringDeserializer::deserialize("ClusterIdentifier", stack)?); } "ClusterVersion" => { obj.cluster_version = Some(StringDeserializer::deserialize("ClusterVersion", stack)?); } "CurrentBackupRateInMegaBytesPerSecond" => { obj.current_backup_rate_in_mega_bytes_per_second = Some(DoubleDeserializer::deserialize( "CurrentBackupRateInMegaBytesPerSecond", stack, )?); } "DBName" => { obj.db_name = Some(StringDeserializer::deserialize("DBName", stack)?); } "ElapsedTimeInSeconds" => { obj.elapsed_time_in_seconds = Some(LongDeserializer::deserialize( "ElapsedTimeInSeconds", stack, )?); } "Encrypted" => { obj.encrypted = Some(BooleanDeserializer::deserialize("Encrypted", stack)?); } "EncryptedWithHSM" => { obj.encrypted_with_hsm = Some(BooleanDeserializer::deserialize("EncryptedWithHSM", stack)?); } "EnhancedVpcRouting" => { obj.enhanced_vpc_routing = Some(BooleanDeserializer::deserialize( "EnhancedVpcRouting", stack, )?); } "EstimatedSecondsToCompletion" => { obj.estimated_seconds_to_completion = Some(LongDeserializer::deserialize( "EstimatedSecondsToCompletion", stack, )?); } "KmsKeyId" => { obj.kms_key_id = Some(StringDeserializer::deserialize("KmsKeyId", stack)?); } "MaintenanceTrackName" => { obj.maintenance_track_name = Some(StringDeserializer::deserialize( "MaintenanceTrackName", stack, )?); } "ManualSnapshotRemainingDays" => { obj.manual_snapshot_remaining_days = Some(IntegerOptionalDeserializer::deserialize( "ManualSnapshotRemainingDays", stack, )?); } "ManualSnapshotRetentionPeriod" => { obj.manual_snapshot_retention_period = Some(IntegerOptionalDeserializer::deserialize( "ManualSnapshotRetentionPeriod", stack, )?); } "MasterUsername" => { obj.master_username = Some(StringDeserializer::deserialize("MasterUsername", stack)?); } "NodeType" => { obj.node_type = Some(StringDeserializer::deserialize("NodeType", stack)?); } "NumberOfNodes" => { obj.number_of_nodes = Some(IntegerDeserializer::deserialize("NumberOfNodes", stack)?); } "OwnerAccount" => { obj.owner_account = Some(StringDeserializer::deserialize("OwnerAccount", stack)?); } "Port" => { obj.port = Some(IntegerDeserializer::deserialize("Port", stack)?); } "RestorableNodeTypes" => { obj.restorable_node_types.get_or_insert(vec![]).extend( RestorableNodeTypeListDeserializer::deserialize( "RestorableNodeTypes", stack, )?, ); } "SnapshotCreateTime" => { obj.snapshot_create_time = Some(TStampDeserializer::deserialize( "SnapshotCreateTime", stack, )?); } "SnapshotIdentifier" => { obj.snapshot_identifier = Some(StringDeserializer::deserialize( "SnapshotIdentifier", stack, )?); } "SnapshotRetentionStartTime" => { obj.snapshot_retention_start_time = Some(TStampDeserializer::deserialize( "SnapshotRetentionStartTime", stack, )?); } "SnapshotType" => { obj.snapshot_type = Some(StringDeserializer::deserialize("SnapshotType", stack)?); } "SourceRegion" => { obj.source_region = Some(StringDeserializer::deserialize("SourceRegion", stack)?); } "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } "TotalBackupSizeInMegaBytes" => { obj.total_backup_size_in_mega_bytes = Some(DoubleDeserializer::deserialize( "TotalBackupSizeInMegaBytes", stack, )?); } "VpcId" => { obj.vpc_id = Some(StringDeserializer::deserialize("VpcId", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

The snapshot copy grant that grants Amazon Redshift permission to encrypt copied snapshots with the specified customer master key (CMK) from AWS KMS in the destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct SnapshotCopyGrant { ///

The unique identifier of the customer master key (CMK) in AWS KMS to which Amazon Redshift is granted permission.

pub kms_key_id: Option, ///

The name of the snapshot copy grant.

pub snapshot_copy_grant_name: Option, ///

A list of tag instances.

pub tags: Option>, } #[allow(dead_code)] struct SnapshotCopyGrantDeserializer; impl SnapshotCopyGrantDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, SnapshotCopyGrant, _>(tag_name, stack, |name, stack, obj| { match name { "KmsKeyId" => { obj.kms_key_id = Some(StringDeserializer::deserialize("KmsKeyId", stack)?); } "SnapshotCopyGrantName" => { obj.snapshot_copy_grant_name = Some(StringDeserializer::deserialize( "SnapshotCopyGrantName", stack, )?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct SnapshotCopyGrantListDeserializer; impl SnapshotCopyGrantListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "SnapshotCopyGrant" { obj.push(SnapshotCopyGrantDeserializer::deserialize( "SnapshotCopyGrant", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct SnapshotCopyGrantMessage { ///

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeSnapshotCopyGrant request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

Constraints: You can specify either the SnapshotCopyGrantName parameter or the Marker parameter, but not both.

pub marker: Option, ///

The list of SnapshotCopyGrant objects.

pub snapshot_copy_grants: Option>, } #[allow(dead_code)] struct SnapshotCopyGrantMessageDeserializer; impl SnapshotCopyGrantMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, SnapshotCopyGrantMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "SnapshotCopyGrants" => { obj.snapshot_copy_grants.get_or_insert(vec![]).extend( SnapshotCopyGrantListDeserializer::deserialize( "SnapshotCopyGrants", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } ///

Describes the errors returned by a snapshot.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct SnapshotErrorMessage { ///

The failure code for the error.

pub failure_code: Option, ///

The text message describing the error.

pub failure_reason: Option, ///

A unique identifier for the cluster.

pub snapshot_cluster_identifier: Option, ///

A unique identifier for the snapshot returning the error.

pub snapshot_identifier: Option, } #[allow(dead_code)] struct SnapshotErrorMessageDeserializer; impl SnapshotErrorMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, SnapshotErrorMessage, _>(tag_name, stack, |name, stack, obj| { match name { "FailureCode" => { obj.failure_code = Some(StringDeserializer::deserialize("FailureCode", stack)?); } "FailureReason" => { obj.failure_reason = Some(StringDeserializer::deserialize("FailureReason", stack)?); } "SnapshotClusterIdentifier" => { obj.snapshot_cluster_identifier = Some(StringDeserializer::deserialize( "SnapshotClusterIdentifier", stack, )?); } "SnapshotIdentifier" => { obj.snapshot_identifier = Some(StringDeserializer::deserialize( "SnapshotIdentifier", stack, )?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct SnapshotIdentifierListDeserializer; impl SnapshotIdentifierListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "String" { obj.push(StringDeserializer::deserialize("String", stack)?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `SnapshotIdentifierList` contents to a `SignedRequest`. struct SnapshotIdentifierListSerializer; impl SnapshotIdentifierListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } #[allow(dead_code)] struct SnapshotListDeserializer; impl SnapshotListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "Snapshot" { obj.push(SnapshotDeserializer::deserialize("Snapshot", stack)?); } else { skip_tree(stack); } Ok(()) }) } } ///

Contains the output from the DescribeClusterSnapshots action.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct SnapshotMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

A list of Snapshot instances.

pub snapshots: Option>, } #[allow(dead_code)] struct SnapshotMessageDeserializer; impl SnapshotMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, SnapshotMessage, _>(tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "Snapshots" => { obj.snapshots .get_or_insert(vec![]) .extend(SnapshotListDeserializer::deserialize("Snapshots", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes a snapshot schedule. You can set a regular interval for creating snapshots of a cluster. You can also schedule snapshots for specific dates.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct SnapshotSchedule { ///

The number of clusters associated with the schedule.

pub associated_cluster_count: Option, ///

A list of clusters associated with the schedule. A maximum of 100 clusters is returned.

pub associated_clusters: Option>, ///

pub next_invocations: Option>, ///

A list of ScheduleDefinitions.

pub schedule_definitions: Option>, ///

The description of the schedule.

pub schedule_description: Option, ///

A unique identifier for the schedule.

pub schedule_identifier: Option, ///

An optional set of tags describing the schedule.

pub tags: Option>, } #[allow(dead_code)] struct SnapshotScheduleDeserializer; impl SnapshotScheduleDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, SnapshotSchedule, _>(tag_name, stack, |name, stack, obj| { match name { "AssociatedClusterCount" => { obj.associated_cluster_count = Some(IntegerOptionalDeserializer::deserialize( "AssociatedClusterCount", stack, )?); } "AssociatedClusters" => { obj.associated_clusters.get_or_insert(vec![]).extend( AssociatedClusterListDeserializer::deserialize( "AssociatedClusters", stack, )?, ); } "NextInvocations" => { obj.next_invocations.get_or_insert(vec![]).extend( ScheduledSnapshotTimeListDeserializer::deserialize( "NextInvocations", stack, )?, ); } "ScheduleDefinitions" => { obj.schedule_definitions.get_or_insert(vec![]).extend( ScheduleDefinitionListDeserializer::deserialize( "ScheduleDefinitions", stack, )?, ); } "ScheduleDescription" => { obj.schedule_description = Some(StringDeserializer::deserialize( "ScheduleDescription", stack, )?); } "ScheduleIdentifier" => { obj.schedule_identifier = Some(StringDeserializer::deserialize( "ScheduleIdentifier", stack, )?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct SnapshotScheduleListDeserializer; impl SnapshotScheduleListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "SnapshotSchedule" { obj.push(SnapshotScheduleDeserializer::deserialize( "SnapshotSchedule", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes a sorting entity

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct SnapshotSortingEntity { ///

The category for sorting the snapshots.

pub attribute: String, ///

The order for listing the attributes.

pub sort_order: Option, } /// Serialize `SnapshotSortingEntity` contents to a `SignedRequest`. struct SnapshotSortingEntitySerializer; impl SnapshotSortingEntitySerializer { fn serialize(params: &mut Params, name: &str, obj: &SnapshotSortingEntity) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "Attribute"), &obj.attribute); if let Some(ref field_value) = obj.sort_order { params.put(&format!("{}{}", prefix, "SortOrder"), &field_value); } } } /// Serialize `SnapshotSortingEntityList` contents to a `SignedRequest`. struct SnapshotSortingEntityListSerializer; impl SnapshotSortingEntityListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); SnapshotSortingEntitySerializer::serialize(params, &key, obj); } } } #[allow(dead_code)] struct SourceIdsListDeserializer; impl SourceIdsListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "SourceId" { obj.push(StringDeserializer::deserialize("SourceId", stack)?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `SourceIdsList` contents to a `SignedRequest`. struct SourceIdsListSerializer; impl SourceIdsListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } #[allow(dead_code)] struct SourceTypeDeserializer; impl SourceTypeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } #[allow(dead_code)] struct StringDeserializer; impl StringDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } ///

Describes a subnet.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct Subnet { ///

pub subnet_availability_zone: Option, ///

The identifier of the subnet.

pub subnet_identifier: Option, ///

The status of the subnet.

pub subnet_status: Option, } #[allow(dead_code)] struct SubnetDeserializer; impl SubnetDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { deserialize_elements::<_, Subnet, _>(tag_name, stack, |name, stack, obj| { match name { "SubnetAvailabilityZone" => { obj.subnet_availability_zone = Some(AvailabilityZoneDeserializer::deserialize( "SubnetAvailabilityZone", stack, )?); } "SubnetIdentifier" => { obj.subnet_identifier = Some(StringDeserializer::deserialize("SubnetIdentifier", stack)?); } "SubnetStatus" => { obj.subnet_status = Some(StringDeserializer::deserialize("SubnetStatus", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } /// Serialize `SubnetIdentifierList` contents to a `SignedRequest`. struct SubnetIdentifierListSerializer; impl SubnetIdentifierListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } #[allow(dead_code)] struct SubnetListDeserializer; impl SubnetListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "Subnet" { obj.push(SubnetDeserializer::deserialize("Subnet", stack)?); } else { skip_tree(stack); } Ok(()) }) } } ///

Describes the operations that are allowed on a maintenance track.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct SupportedOperation { ///

A list of the supported operations.

pub operation_name: Option, } #[allow(dead_code)] struct SupportedOperationDeserializer; impl SupportedOperationDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, SupportedOperation, _>(tag_name, stack, |name, stack, obj| { match name { "OperationName" => { obj.operation_name = Some(StringDeserializer::deserialize("OperationName", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct SupportedOperationListDeserializer; impl SupportedOperationListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "SupportedOperation" { obj.push(SupportedOperationDeserializer::deserialize( "SupportedOperation", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

A list of supported platforms for orderable clusters.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct SupportedPlatform { ///

pub name: Option, } #[allow(dead_code)] struct SupportedPlatformDeserializer; impl SupportedPlatformDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, SupportedPlatform, _>(tag_name, stack, |name, stack, obj| { match name { "Name" => { obj.name = Some(StringDeserializer::deserialize("Name", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct SupportedPlatformsListDeserializer; impl SupportedPlatformsListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "SupportedPlatform" { obj.push(SupportedPlatformDeserializer::deserialize( "SupportedPlatform", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[allow(dead_code)] struct TStampDeserializer; impl TStampDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } ///

Describes the status of a RestoreTableFromClusterSnapshot operation.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct TableRestoreStatus { ///

The identifier of the Amazon Redshift cluster that the table is being restored to.

pub cluster_identifier: Option, ///

A description of the status of the table restore request. Status values include SUCCEEDED, FAILED, CANCELED, PENDING, IN_PROGRESS.

pub message: Option, ///

The name of the table to create as a result of the table restore request.

pub new_table_name: Option, ///

The amount of data restored to the new table so far, in megabytes (MB).

pub progress_in_mega_bytes: Option, ///

The time that the table restore request was made, in Universal Coordinated Time (UTC).

pub request_time: Option, ///

The identifier of the snapshot that the table is being restored from.

pub snapshot_identifier: Option, ///

The name of the source database that contains the table being restored.

pub source_database_name: Option, ///

The name of the source schema that contains the table being restored.

pub source_schema_name: Option, ///

The name of the source table being restored.

pub source_table_name: Option, ///

A value that describes the current state of the table restore request.

Valid Values: SUCCEEDED, FAILED, CANCELED, PENDING, IN_PROGRESS

pub status: Option, ///

The unique identifier for the table restore request.

pub table_restore_request_id: Option, ///

The name of the database to restore the table to.

pub target_database_name: Option, ///

The name of the schema to restore the table to.

pub target_schema_name: Option, ///

The total amount of data to restore to the new table, in megabytes (MB).

pub total_data_in_mega_bytes: Option, } #[allow(dead_code)] struct TableRestoreStatusDeserializer; impl TableRestoreStatusDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, TableRestoreStatus, _>(tag_name, stack, |name, stack, obj| { match name { "ClusterIdentifier" => { obj.cluster_identifier = Some(StringDeserializer::deserialize("ClusterIdentifier", stack)?); } "Message" => { obj.message = Some(StringDeserializer::deserialize("Message", stack)?); } "NewTableName" => { obj.new_table_name = Some(StringDeserializer::deserialize("NewTableName", stack)?); } "ProgressInMegaBytes" => { obj.progress_in_mega_bytes = Some(LongOptionalDeserializer::deserialize( "ProgressInMegaBytes", stack, )?); } "RequestTime" => { obj.request_time = Some(TStampDeserializer::deserialize("RequestTime", stack)?); } "SnapshotIdentifier" => { obj.snapshot_identifier = Some(StringDeserializer::deserialize( "SnapshotIdentifier", stack, )?); } "SourceDatabaseName" => { obj.source_database_name = Some(StringDeserializer::deserialize( "SourceDatabaseName", stack, )?); } "SourceSchemaName" => { obj.source_schema_name = Some(StringDeserializer::deserialize("SourceSchemaName", stack)?); } "SourceTableName" => { obj.source_table_name = Some(StringDeserializer::deserialize("SourceTableName", stack)?); } "Status" => { obj.status = Some(TableRestoreStatusTypeDeserializer::deserialize( "Status", stack, )?); } "TableRestoreRequestId" => { obj.table_restore_request_id = Some(StringDeserializer::deserialize( "TableRestoreRequestId", stack, )?); } "TargetDatabaseName" => { obj.target_database_name = Some(StringDeserializer::deserialize( "TargetDatabaseName", stack, )?); } "TargetSchemaName" => { obj.target_schema_name = Some(StringDeserializer::deserialize("TargetSchemaName", stack)?); } "TotalDataInMegaBytes" => { obj.total_data_in_mega_bytes = Some(LongOptionalDeserializer::deserialize( "TotalDataInMegaBytes", stack, )?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct TableRestoreStatusListDeserializer; impl TableRestoreStatusListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "TableRestoreStatus" { obj.push(TableRestoreStatusDeserializer::deserialize( "TableRestoreStatus", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct TableRestoreStatusMessage { ///

A pagination token that can be used in a subsequent DescribeTableRestoreStatus request.

pub marker: Option, ///

A list of status details for one or more table restore requests.

pub table_restore_status_details: Option>, } #[allow(dead_code)] struct TableRestoreStatusMessageDeserializer; impl TableRestoreStatusMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, TableRestoreStatusMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "TableRestoreStatusDetails" => { obj.table_restore_status_details .get_or_insert(vec![]) .extend(TableRestoreStatusListDeserializer::deserialize( "TableRestoreStatusDetails", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct TableRestoreStatusTypeDeserializer; impl TableRestoreStatusTypeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } ///

A tag consisting of a name/value pair for a resource.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] pub struct Tag { ///

The key, or name, for the resource tag.

pub key: Option, ///

The value for the resource tag.

pub value: Option, } #[allow(dead_code)] struct TagDeserializer; impl TagDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { deserialize_elements::<_, Tag, _>(tag_name, stack, |name, stack, obj| { match name { "Key" => { obj.key = Some(StringDeserializer::deserialize("Key", stack)?); } "Value" => { obj.value = Some(StringDeserializer::deserialize("Value", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } /// Serialize `Tag` contents to a `SignedRequest`. struct TagSerializer; impl TagSerializer { fn serialize(params: &mut Params, name: &str, obj: &Tag) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.key { params.put(&format!("{}{}", prefix, "Key"), &field_value); } if let Some(ref field_value) = obj.value { params.put(&format!("{}{}", prefix, "Value"), &field_value); } } } /// Serialize `TagKeyList` contents to a `SignedRequest`. struct TagKeyListSerializer; impl TagKeyListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } #[allow(dead_code)] struct TagListDeserializer; impl TagListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "Tag" { obj.push(TagDeserializer::deserialize("Tag", stack)?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `TagList` contents to a `SignedRequest`. struct TagListSerializer; impl TagListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); TagSerializer::serialize(params, &key, obj); } } } /// Serialize `TagValueList` contents to a `SignedRequest`. struct TagValueListSerializer; impl TagValueListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } ///

A tag and its associated resource.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct TaggedResource { ///

The Amazon Resource Name (ARN) with which the tag is associated, for example: arn:aws:redshift:us-east-2:123456789:cluster:t1.

pub resource_name: Option, ///

The type of resource with which the tag is associated. Valid resource types are:

  • Cluster

  • CIDR/IP

  • EC2 security group

  • Snapshot

  • Cluster security group

  • Subnet group

  • HSM connection

  • HSM certificate

  • Parameter group

For more information about Amazon Redshift resource types and constructing ARNs, go to Constructing an Amazon Redshift Amazon Resource Name (ARN) in the Amazon Redshift Cluster Management Guide.

pub resource_type: Option, ///

The tag for the resource.

pub tag: Option, } #[allow(dead_code)] struct TaggedResourceDeserializer; impl TaggedResourceDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, TaggedResource, _>(tag_name, stack, |name, stack, obj| { match name { "ResourceName" => { obj.resource_name = Some(StringDeserializer::deserialize("ResourceName", stack)?); } "ResourceType" => { obj.resource_type = Some(StringDeserializer::deserialize("ResourceType", stack)?); } "Tag" => { obj.tag = Some(TagDeserializer::deserialize("Tag", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct TaggedResourceListDeserializer; impl TaggedResourceListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "TaggedResource" { obj.push(TaggedResourceDeserializer::deserialize( "TaggedResource", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } ///

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct TaggedResourceListMessage { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

A list of tags with their associated resources.

pub tagged_resources: Option>, } #[allow(dead_code)] struct TaggedResourceListMessageDeserializer; impl TaggedResourceListMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, TaggedResourceListMessage, _>( tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "TaggedResources" => { obj.tagged_resources.get_or_insert(vec![]).extend( TaggedResourceListDeserializer::deserialize("TaggedResources", stack)?, ); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct TrackListDeserializer; impl TrackListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "MaintenanceTrack" { obj.push(MaintenanceTrackDeserializer::deserialize( "MaintenanceTrack", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct TrackListMessage { ///

A list of maintenance tracks output by the DescribeClusterTracks operation.

pub maintenance_tracks: Option>, ///

The starting point to return a set of response tracklist records. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

pub marker: Option, } #[allow(dead_code)] struct TrackListMessageDeserializer; impl TrackListMessageDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, TrackListMessage, _>(tag_name, stack, |name, stack, obj| { match name { "MaintenanceTracks" => { obj.maintenance_tracks.get_or_insert(vec![]).extend( TrackListDeserializer::deserialize("MaintenanceTracks", stack)?, ); } "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } ///

A maintenance track that you can switch the current track to.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct UpdateTarget { ///

The cluster version for the new maintenance track.

pub database_version: Option, ///

The name of the new maintenance track.

pub maintenance_track_name: Option, ///

A list of operations supported by the maintenance track.

pub supported_operations: Option>, } #[allow(dead_code)] struct UpdateTargetDeserializer; impl UpdateTargetDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, UpdateTarget, _>(tag_name, stack, |name, stack, obj| { match name { "DatabaseVersion" => { obj.database_version = Some(StringDeserializer::deserialize("DatabaseVersion", stack)?); } "MaintenanceTrackName" => { obj.maintenance_track_name = Some(StringDeserializer::deserialize( "MaintenanceTrackName", stack, )?); } "SupportedOperations" => { obj.supported_operations.get_or_insert(vec![]).extend( SupportedOperationListDeserializer::deserialize( "SupportedOperations", stack, )?, ); } _ => skip_tree(stack), } Ok(()) }) } } ///

Describes a usage limit object for a cluster.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct UsageLimit { ///

The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).

pub amount: Option, ///

The action that Amazon Redshift takes when the limit is reached. Possible values are:

  • log - To log an event in a system table. The default is log.

  • emit-metric - To emit CloudWatch metrics.

  • disable - To disable the feature until the next usage period begins.

pub breach_action: Option, ///

The identifier of the cluster with a usage limit.

pub cluster_identifier: Option, ///

The Amazon Redshift feature to which the limit applies.

pub feature_type: Option, ///

The type of limit. Depending on the feature type, this can be based on a time duration or data size.

pub limit_type: Option, ///

The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly.

pub period: Option, ///

A list of tag instances.

pub tags: Option>, ///

The identifier of the usage limit.

pub usage_limit_id: Option, } #[allow(dead_code)] struct UsageLimitDeserializer; impl UsageLimitDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, UsageLimit, _>(tag_name, stack, |name, stack, obj| { match name { "Amount" => { obj.amount = Some(LongDeserializer::deserialize("Amount", stack)?); } "BreachAction" => { obj.breach_action = Some(UsageLimitBreachActionDeserializer::deserialize( "BreachAction", stack, )?); } "ClusterIdentifier" => { obj.cluster_identifier = Some(StringDeserializer::deserialize("ClusterIdentifier", stack)?); } "FeatureType" => { obj.feature_type = Some(UsageLimitFeatureTypeDeserializer::deserialize( "FeatureType", stack, )?); } "LimitType" => { obj.limit_type = Some(UsageLimitLimitTypeDeserializer::deserialize( "LimitType", stack, )?); } "Period" => { obj.period = Some(UsageLimitPeriodDeserializer::deserialize("Period", stack)?); } "Tags" => { obj.tags .get_or_insert(vec![]) .extend(TagListDeserializer::deserialize("Tags", stack)?); } "UsageLimitId" => { obj.usage_limit_id = Some(StringDeserializer::deserialize("UsageLimitId", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct UsageLimitBreachActionDeserializer; impl UsageLimitBreachActionDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } #[allow(dead_code)] struct UsageLimitFeatureTypeDeserializer; impl UsageLimitFeatureTypeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } #[allow(dead_code)] struct UsageLimitLimitTypeDeserializer; impl UsageLimitLimitTypeDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } #[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct UsageLimitList { ///

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

pub marker: Option, ///

Contains the output from the DescribeUsageLimits action.

pub usage_limits: Option>, } #[allow(dead_code)] struct UsageLimitListDeserializer; impl UsageLimitListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, UsageLimitList, _>(tag_name, stack, |name, stack, obj| { match name { "Marker" => { obj.marker = Some(StringDeserializer::deserialize("Marker", stack)?); } "UsageLimits" => { obj.usage_limits .get_or_insert(vec![]) .extend(UsageLimitsDeserializer::deserialize("UsageLimits", stack)?); } _ => skip_tree(stack), } Ok(()) }) } } #[allow(dead_code)] struct UsageLimitPeriodDeserializer; impl UsageLimitPeriodDeserializer { #[allow(dead_code, unused_variables)] fn deserialize(tag_name: &str, stack: &mut T) -> Result { xml_util::deserialize_primitive(tag_name, stack, Ok) } } #[allow(dead_code)] struct UsageLimitsDeserializer; impl UsageLimitsDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "member" { obj.push(UsageLimitDeserializer::deserialize("member", stack)?); } else { skip_tree(stack); } Ok(()) }) } } /// Serialize `ValueStringList` contents to a `SignedRequest`. struct ValueStringListSerializer; impl ValueStringListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } /// Serialize `VpcSecurityGroupIdList` contents to a `SignedRequest`. struct VpcSecurityGroupIdListSerializer; impl VpcSecurityGroupIdListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } ///

Describes the members of a VPC security group.

#[derive(Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize_structs", derive(Serialize))] pub struct VpcSecurityGroupMembership { ///

The status of the VPC security group.

pub status: Option, ///

The identifier of the VPC security group.

pub vpc_security_group_id: Option, } #[allow(dead_code)] struct VpcSecurityGroupMembershipDeserializer; impl VpcSecurityGroupMembershipDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result { deserialize_elements::<_, VpcSecurityGroupMembership, _>( tag_name, stack, |name, stack, obj| { match name { "Status" => { obj.status = Some(StringDeserializer::deserialize("Status", stack)?); } "VpcSecurityGroupId" => { obj.vpc_security_group_id = Some(StringDeserializer::deserialize( "VpcSecurityGroupId", stack, )?); } _ => skip_tree(stack), } Ok(()) }, ) } } #[allow(dead_code)] struct VpcSecurityGroupMembershipListDeserializer; impl VpcSecurityGroupMembershipListDeserializer { #[allow(dead_code, unused_variables)] fn deserialize( tag_name: &str, stack: &mut T, ) -> Result, XmlParseError> { deserialize_elements::<_, Vec<_>, _>(tag_name, stack, |name, stack, obj| { if name == "VpcSecurityGroup" { obj.push(VpcSecurityGroupMembershipDeserializer::deserialize( "VpcSecurityGroup", stack, )?); } else { skip_tree(stack); } Ok(()) }) } } /// Errors returned by AcceptReservedNodeExchange #[derive(Debug, PartialEq)] pub enum AcceptReservedNodeExchangeError { ///

Your request cannot be completed because a dependent internal service is temporarily unavailable. Wait 30 to 60 seconds and try again.

DependentServiceUnavailableFault(String), ///

Indicates that the Reserved Node being exchanged is not in an active state.

InvalidReservedNodeStateFault(String), ///

User already has a reservation with the given identifier.

ReservedNodeAlreadyExistsFault(String), ///

Indicates that the reserved node has already been exchanged.

ReservedNodeAlreadyMigratedFault(String), ///

The specified reserved compute node not found.

ReservedNodeNotFoundFault(String), ///

Specified offering does not exist.

ReservedNodeOfferingNotFoundFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), } impl AcceptReservedNodeExchangeError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "DependentServiceUnavailableFault" => { return RusotoError::Service( AcceptReservedNodeExchangeError::DependentServiceUnavailableFault( parsed_error.message, ), ) } "InvalidReservedNodeState" => { return RusotoError::Service( AcceptReservedNodeExchangeError::InvalidReservedNodeStateFault( parsed_error.message, ), ) } "ReservedNodeAlreadyExists" => { return RusotoError::Service( AcceptReservedNodeExchangeError::ReservedNodeAlreadyExistsFault( parsed_error.message, ), ) } "ReservedNodeAlreadyMigrated" => { return RusotoError::Service( AcceptReservedNodeExchangeError::ReservedNodeAlreadyMigratedFault( parsed_error.message, ), ) } "ReservedNodeNotFound" => { return RusotoError::Service( AcceptReservedNodeExchangeError::ReservedNodeNotFoundFault( parsed_error.message, ), ) } "ReservedNodeOfferingNotFound" => { return RusotoError::Service( AcceptReservedNodeExchangeError::ReservedNodeOfferingNotFoundFault( parsed_error.message, ), ) } "UnsupportedOperation" => { return RusotoError::Service( AcceptReservedNodeExchangeError::UnsupportedOperationFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for AcceptReservedNodeExchangeError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { AcceptReservedNodeExchangeError::DependentServiceUnavailableFault(ref cause) => { write!(f, "{}", cause) } AcceptReservedNodeExchangeError::InvalidReservedNodeStateFault(ref cause) => { write!(f, "{}", cause) } AcceptReservedNodeExchangeError::ReservedNodeAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } AcceptReservedNodeExchangeError::ReservedNodeAlreadyMigratedFault(ref cause) => { write!(f, "{}", cause) } AcceptReservedNodeExchangeError::ReservedNodeNotFoundFault(ref cause) => { write!(f, "{}", cause) } AcceptReservedNodeExchangeError::ReservedNodeOfferingNotFoundFault(ref cause) => { write!(f, "{}", cause) } AcceptReservedNodeExchangeError::UnsupportedOperationFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for AcceptReservedNodeExchangeError {} /// Errors returned by AuthorizeClusterSecurityGroupIngress #[derive(Debug, PartialEq)] pub enum AuthorizeClusterSecurityGroupIngressError { ///

The specified CIDR block or EC2 security group is already authorized for the specified cluster security group.

AuthorizationAlreadyExistsFault(String), ///

The authorization quota for the cluster security group has been reached.

AuthorizationQuotaExceededFault(String), ///

The cluster security group name does not refer to an existing cluster security group.

ClusterSecurityGroupNotFoundFault(String), ///

The state of the cluster security group is not available.

InvalidClusterSecurityGroupStateFault(String), } impl AuthorizeClusterSecurityGroupIngressError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "AuthorizationAlreadyExists" => return RusotoError::Service(AuthorizeClusterSecurityGroupIngressError::AuthorizationAlreadyExistsFault(parsed_error.message)),"AuthorizationQuotaExceeded" => return RusotoError::Service(AuthorizeClusterSecurityGroupIngressError::AuthorizationQuotaExceededFault(parsed_error.message)),"ClusterSecurityGroupNotFound" => return RusotoError::Service(AuthorizeClusterSecurityGroupIngressError::ClusterSecurityGroupNotFoundFault(parsed_error.message)),"InvalidClusterSecurityGroupState" => return RusotoError::Service(AuthorizeClusterSecurityGroupIngressError::InvalidClusterSecurityGroupStateFault(parsed_error.message)),_ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for AuthorizeClusterSecurityGroupIngressError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { AuthorizeClusterSecurityGroupIngressError::AuthorizationAlreadyExistsFault( ref cause, ) => write!(f, "{}", cause), AuthorizeClusterSecurityGroupIngressError::AuthorizationQuotaExceededFault( ref cause, ) => write!(f, "{}", cause), AuthorizeClusterSecurityGroupIngressError::ClusterSecurityGroupNotFoundFault( ref cause, ) => write!(f, "{}", cause), AuthorizeClusterSecurityGroupIngressError::InvalidClusterSecurityGroupStateFault( ref cause, ) => write!(f, "{}", cause), } } } impl Error for AuthorizeClusterSecurityGroupIngressError {} /// Errors returned by AuthorizeSnapshotAccess #[derive(Debug, PartialEq)] pub enum AuthorizeSnapshotAccessError { ///

The specified CIDR block or EC2 security group is already authorized for the specified cluster security group.

AuthorizationAlreadyExistsFault(String), ///

The authorization quota for the cluster security group has been reached.

AuthorizationQuotaExceededFault(String), ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

The specified cluster snapshot is not in the available state, or other accounts are authorized to access the snapshot.

InvalidClusterSnapshotStateFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

LimitExceededFault(String), } impl AuthorizeSnapshotAccessError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "AuthorizationAlreadyExists" => { return RusotoError::Service( AuthorizeSnapshotAccessError::AuthorizationAlreadyExistsFault( parsed_error.message, ), ) } "AuthorizationQuotaExceeded" => { return RusotoError::Service( AuthorizeSnapshotAccessError::AuthorizationQuotaExceededFault( parsed_error.message, ), ) } "ClusterSnapshotNotFound" => { return RusotoError::Service( AuthorizeSnapshotAccessError::ClusterSnapshotNotFoundFault( parsed_error.message, ), ) } "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( AuthorizeSnapshotAccessError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "InvalidClusterSnapshotState" => { return RusotoError::Service( AuthorizeSnapshotAccessError::InvalidClusterSnapshotStateFault( parsed_error.message, ), ) } "LimitExceededFault" => { return RusotoError::Service( AuthorizeSnapshotAccessError::LimitExceededFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for AuthorizeSnapshotAccessError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { AuthorizeSnapshotAccessError::AuthorizationAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } AuthorizeSnapshotAccessError::AuthorizationQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } AuthorizeSnapshotAccessError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } AuthorizeSnapshotAccessError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } AuthorizeSnapshotAccessError::InvalidClusterSnapshotStateFault(ref cause) => { write!(f, "{}", cause) } AuthorizeSnapshotAccessError::LimitExceededFault(ref cause) => write!(f, "{}", cause), } } } impl Error for AuthorizeSnapshotAccessError {} /// Errors returned by BatchDeleteClusterSnapshots #[derive(Debug, PartialEq)] pub enum BatchDeleteClusterSnapshotsError { ///

The maximum number for a batch delete of snapshots has been reached. The limit is 100.

BatchDeleteRequestSizeExceededFault(String), } impl BatchDeleteClusterSnapshotsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "BatchDeleteRequestSizeExceeded" => { return RusotoError::Service( BatchDeleteClusterSnapshotsError::BatchDeleteRequestSizeExceededFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for BatchDeleteClusterSnapshotsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { BatchDeleteClusterSnapshotsError::BatchDeleteRequestSizeExceededFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for BatchDeleteClusterSnapshotsError {} /// Errors returned by BatchModifyClusterSnapshots #[derive(Debug, PartialEq)] pub enum BatchModifyClusterSnapshotsError { ///

The maximum number for snapshot identifiers has been reached. The limit is 100.

BatchModifyClusterSnapshotsLimitExceededFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), } impl BatchModifyClusterSnapshotsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "BatchModifyClusterSnapshotsLimitExceededFault" => return RusotoError::Service(BatchModifyClusterSnapshotsError::BatchModifyClusterSnapshotsLimitExceededFault(parsed_error.message)),"InvalidRetentionPeriodFault" => return RusotoError::Service(BatchModifyClusterSnapshotsError::InvalidRetentionPeriodFault(parsed_error.message)),_ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for BatchModifyClusterSnapshotsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { BatchModifyClusterSnapshotsError::BatchModifyClusterSnapshotsLimitExceededFault( ref cause, ) => write!(f, "{}", cause), BatchModifyClusterSnapshotsError::InvalidRetentionPeriodFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for BatchModifyClusterSnapshotsError {} /// Errors returned by CancelResize #[derive(Debug, PartialEq)] pub enum CancelResizeError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

A resize operation for the specified cluster is not found.

ResizeNotFoundFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), } impl CancelResizeError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(CancelResizeError::ClusterNotFoundFault( parsed_error.message, )) } "InvalidClusterState" => { return RusotoError::Service(CancelResizeError::InvalidClusterStateFault( parsed_error.message, )) } "ResizeNotFound" => { return RusotoError::Service(CancelResizeError::ResizeNotFoundFault( parsed_error.message, )) } "UnsupportedOperation" => { return RusotoError::Service(CancelResizeError::UnsupportedOperationFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CancelResizeError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CancelResizeError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), CancelResizeError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), CancelResizeError::ResizeNotFoundFault(ref cause) => write!(f, "{}", cause), CancelResizeError::UnsupportedOperationFault(ref cause) => write!(f, "{}", cause), } } } impl Error for CancelResizeError {} /// Errors returned by CopyClusterSnapshot #[derive(Debug, PartialEq)] pub enum CopyClusterSnapshotError { ///

The value specified as a snapshot identifier is already used by an existing snapshot.

ClusterSnapshotAlreadyExistsFault(String), ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), ///

The request would result in the user exceeding the allowed number of cluster snapshots.

ClusterSnapshotQuotaExceededFault(String), ///

The specified cluster snapshot is not in the available state, or other accounts are authorized to access the snapshot.

InvalidClusterSnapshotStateFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), } impl CopyClusterSnapshotError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSnapshotAlreadyExists" => { return RusotoError::Service( CopyClusterSnapshotError::ClusterSnapshotAlreadyExistsFault( parsed_error.message, ), ) } "ClusterSnapshotNotFound" => { return RusotoError::Service( CopyClusterSnapshotError::ClusterSnapshotNotFoundFault( parsed_error.message, ), ) } "ClusterSnapshotQuotaExceeded" => { return RusotoError::Service( CopyClusterSnapshotError::ClusterSnapshotQuotaExceededFault( parsed_error.message, ), ) } "InvalidClusterSnapshotState" => { return RusotoError::Service( CopyClusterSnapshotError::InvalidClusterSnapshotStateFault( parsed_error.message, ), ) } "InvalidRetentionPeriodFault" => { return RusotoError::Service( CopyClusterSnapshotError::InvalidRetentionPeriodFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CopyClusterSnapshotError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CopyClusterSnapshotError::ClusterSnapshotAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CopyClusterSnapshotError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } CopyClusterSnapshotError::ClusterSnapshotQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CopyClusterSnapshotError::InvalidClusterSnapshotStateFault(ref cause) => { write!(f, "{}", cause) } CopyClusterSnapshotError::InvalidRetentionPeriodFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for CopyClusterSnapshotError {} /// Errors returned by CreateCluster #[derive(Debug, PartialEq)] pub enum CreateClusterError { ///

The account already has a cluster with the given identifier.

ClusterAlreadyExistsFault(String), ///

The parameter group name does not refer to an existing parameter group.

ClusterParameterGroupNotFoundFault(String), ///

The request would exceed the allowed number of cluster instances for this account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

ClusterQuotaExceededFault(String), ///

The cluster security group name does not refer to an existing cluster security group.

ClusterSecurityGroupNotFoundFault(String), ///

The cluster subnet group name does not refer to an existing cluster subnet group.

ClusterSubnetGroupNotFoundFault(String), ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

There is no Amazon Redshift HSM client certificate with the specified identifier.

HsmClientCertificateNotFoundFault(String), ///

There is no Amazon Redshift HSM configuration with the specified identifier.

HsmConfigurationNotFoundFault(String), ///

The number of nodes specified exceeds the allotted capacity of the cluster.

InsufficientClusterCapacityFault(String), ///

The cluster subnet group cannot be deleted because it is in use.

InvalidClusterSubnetGroupStateFault(String), ///

The provided cluster track name is not valid.

InvalidClusterTrackFault(String), ///

The Elastic IP (EIP) is invalid or cannot be found.

InvalidElasticIpFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), ///

The requested subnet is not valid, or not all of the subnets are in the same VPC.

InvalidSubnet(String), ///

The tag is invalid.

InvalidTagFault(String), ///

The cluster subnet group does not cover all Availability Zones.

InvalidVPCNetworkStateFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

LimitExceededFault(String), ///

The operation would exceed the number of nodes allowed for a cluster.

NumberOfNodesPerClusterLimitExceededFault(String), ///

The operation would exceed the number of nodes allotted to the account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

NumberOfNodesQuotaExceededFault(String), ///

We could not find the specified snapshot schedule.

SnapshotScheduleNotFoundFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl CreateClusterError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterAlreadyExists" => { return RusotoError::Service(CreateClusterError::ClusterAlreadyExistsFault( parsed_error.message, )) } "ClusterParameterGroupNotFound" => { return RusotoError::Service( CreateClusterError::ClusterParameterGroupNotFoundFault( parsed_error.message, ), ) } "ClusterQuotaExceeded" => { return RusotoError::Service(CreateClusterError::ClusterQuotaExceededFault( parsed_error.message, )) } "ClusterSecurityGroupNotFound" => { return RusotoError::Service( CreateClusterError::ClusterSecurityGroupNotFoundFault( parsed_error.message, ), ) } "ClusterSubnetGroupNotFoundFault" => { return RusotoError::Service( CreateClusterError::ClusterSubnetGroupNotFoundFault( parsed_error.message, ), ) } "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( CreateClusterError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "HsmClientCertificateNotFoundFault" => { return RusotoError::Service( CreateClusterError::HsmClientCertificateNotFoundFault( parsed_error.message, ), ) } "HsmConfigurationNotFoundFault" => { return RusotoError::Service( CreateClusterError::HsmConfigurationNotFoundFault(parsed_error.message), ) } "InsufficientClusterCapacity" => { return RusotoError::Service( CreateClusterError::InsufficientClusterCapacityFault( parsed_error.message, ), ) } "InvalidClusterSubnetGroupStateFault" => { return RusotoError::Service( CreateClusterError::InvalidClusterSubnetGroupStateFault( parsed_error.message, ), ) } "InvalidClusterTrack" => { return RusotoError::Service(CreateClusterError::InvalidClusterTrackFault( parsed_error.message, )) } "InvalidElasticIpFault" => { return RusotoError::Service(CreateClusterError::InvalidElasticIpFault( parsed_error.message, )) } "InvalidRetentionPeriodFault" => { return RusotoError::Service( CreateClusterError::InvalidRetentionPeriodFault(parsed_error.message), ) } "InvalidSubnet" => { return RusotoError::Service(CreateClusterError::InvalidSubnet( parsed_error.message, )) } "InvalidTagFault" => { return RusotoError::Service(CreateClusterError::InvalidTagFault( parsed_error.message, )) } "InvalidVPCNetworkStateFault" => { return RusotoError::Service( CreateClusterError::InvalidVPCNetworkStateFault(parsed_error.message), ) } "LimitExceededFault" => { return RusotoError::Service(CreateClusterError::LimitExceededFault( parsed_error.message, )) } "NumberOfNodesPerClusterLimitExceeded" => { return RusotoError::Service( CreateClusterError::NumberOfNodesPerClusterLimitExceededFault( parsed_error.message, ), ) } "NumberOfNodesQuotaExceeded" => { return RusotoError::Service( CreateClusterError::NumberOfNodesQuotaExceededFault( parsed_error.message, ), ) } "SnapshotScheduleNotFound" => { return RusotoError::Service( CreateClusterError::SnapshotScheduleNotFoundFault(parsed_error.message), ) } "TagLimitExceededFault" => { return RusotoError::Service(CreateClusterError::TagLimitExceededFault( parsed_error.message, )) } "UnauthorizedOperation" => { return RusotoError::Service(CreateClusterError::UnauthorizedOperation( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateClusterError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateClusterError::ClusterAlreadyExistsFault(ref cause) => write!(f, "{}", cause), CreateClusterError::ClusterParameterGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::ClusterQuotaExceededFault(ref cause) => write!(f, "{}", cause), CreateClusterError::ClusterSecurityGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::ClusterSubnetGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::HsmClientCertificateNotFoundFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::HsmConfigurationNotFoundFault(ref cause) => write!(f, "{}", cause), CreateClusterError::InsufficientClusterCapacityFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::InvalidClusterSubnetGroupStateFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::InvalidClusterTrackFault(ref cause) => write!(f, "{}", cause), CreateClusterError::InvalidElasticIpFault(ref cause) => write!(f, "{}", cause), CreateClusterError::InvalidRetentionPeriodFault(ref cause) => write!(f, "{}", cause), CreateClusterError::InvalidSubnet(ref cause) => write!(f, "{}", cause), CreateClusterError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateClusterError::InvalidVPCNetworkStateFault(ref cause) => write!(f, "{}", cause), CreateClusterError::LimitExceededFault(ref cause) => write!(f, "{}", cause), CreateClusterError::NumberOfNodesPerClusterLimitExceededFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::NumberOfNodesQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateClusterError::SnapshotScheduleNotFoundFault(ref cause) => write!(f, "{}", cause), CreateClusterError::TagLimitExceededFault(ref cause) => write!(f, "{}", cause), CreateClusterError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), } } } impl Error for CreateClusterError {} /// Errors returned by CreateClusterParameterGroup #[derive(Debug, PartialEq)] pub enum CreateClusterParameterGroupError { ///

A cluster parameter group with the same name already exists.

ClusterParameterGroupAlreadyExistsFault(String), ///

The request would result in the user exceeding the allowed number of cluster parameter groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

ClusterParameterGroupQuotaExceededFault(String), ///

The tag is invalid.

InvalidTagFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateClusterParameterGroupError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterParameterGroupAlreadyExists" => return RusotoError::Service( CreateClusterParameterGroupError::ClusterParameterGroupAlreadyExistsFault( parsed_error.message, ), ), "ClusterParameterGroupQuotaExceeded" => return RusotoError::Service( CreateClusterParameterGroupError::ClusterParameterGroupQuotaExceededFault( parsed_error.message, ), ), "InvalidTagFault" => { return RusotoError::Service( CreateClusterParameterGroupError::InvalidTagFault(parsed_error.message), ) } "TagLimitExceededFault" => { return RusotoError::Service( CreateClusterParameterGroupError::TagLimitExceededFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateClusterParameterGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateClusterParameterGroupError::ClusterParameterGroupAlreadyExistsFault( ref cause, ) => write!(f, "{}", cause), CreateClusterParameterGroupError::ClusterParameterGroupQuotaExceededFault( ref cause, ) => write!(f, "{}", cause), CreateClusterParameterGroupError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateClusterParameterGroupError::TagLimitExceededFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for CreateClusterParameterGroupError {} /// Errors returned by CreateClusterSecurityGroup #[derive(Debug, PartialEq)] pub enum CreateClusterSecurityGroupError { ///

A cluster security group with the same name already exists.

ClusterSecurityGroupAlreadyExistsFault(String), ///

The request would result in the user exceeding the allowed number of cluster security groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

ClusterSecurityGroupQuotaExceededFault(String), ///

The tag is invalid.

InvalidTagFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateClusterSecurityGroupError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSecurityGroupAlreadyExists" => { return RusotoError::Service( CreateClusterSecurityGroupError::ClusterSecurityGroupAlreadyExistsFault( parsed_error.message, ), ) } "QuotaExceeded.ClusterSecurityGroup" => { return RusotoError::Service( CreateClusterSecurityGroupError::ClusterSecurityGroupQuotaExceededFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service( CreateClusterSecurityGroupError::InvalidTagFault(parsed_error.message), ) } "TagLimitExceededFault" => { return RusotoError::Service( CreateClusterSecurityGroupError::TagLimitExceededFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateClusterSecurityGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateClusterSecurityGroupError::ClusterSecurityGroupAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSecurityGroupError::ClusterSecurityGroupQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSecurityGroupError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateClusterSecurityGroupError::TagLimitExceededFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for CreateClusterSecurityGroupError {} /// Errors returned by CreateClusterSnapshot #[derive(Debug, PartialEq)] pub enum CreateClusterSnapshotError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The value specified as a snapshot identifier is already used by an existing snapshot.

ClusterSnapshotAlreadyExistsFault(String), ///

The request would result in the user exceeding the allowed number of cluster snapshots.

ClusterSnapshotQuotaExceededFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), ///

The tag is invalid.

InvalidTagFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateClusterSnapshotError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( CreateClusterSnapshotError::ClusterNotFoundFault(parsed_error.message), ) } "ClusterSnapshotAlreadyExists" => { return RusotoError::Service( CreateClusterSnapshotError::ClusterSnapshotAlreadyExistsFault( parsed_error.message, ), ) } "ClusterSnapshotQuotaExceeded" => { return RusotoError::Service( CreateClusterSnapshotError::ClusterSnapshotQuotaExceededFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service( CreateClusterSnapshotError::InvalidClusterStateFault( parsed_error.message, ), ) } "InvalidRetentionPeriodFault" => { return RusotoError::Service( CreateClusterSnapshotError::InvalidRetentionPeriodFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service(CreateClusterSnapshotError::InvalidTagFault( parsed_error.message, )) } "TagLimitExceededFault" => { return RusotoError::Service( CreateClusterSnapshotError::TagLimitExceededFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateClusterSnapshotError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateClusterSnapshotError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), CreateClusterSnapshotError::ClusterSnapshotAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSnapshotError::ClusterSnapshotQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSnapshotError::InvalidClusterStateFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSnapshotError::InvalidRetentionPeriodFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSnapshotError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateClusterSnapshotError::TagLimitExceededFault(ref cause) => write!(f, "{}", cause), } } } impl Error for CreateClusterSnapshotError {} /// Errors returned by CreateClusterSubnetGroup #[derive(Debug, PartialEq)] pub enum CreateClusterSubnetGroupError { ///

A ClusterSubnetGroupName is already used by an existing cluster subnet group.

ClusterSubnetGroupAlreadyExistsFault(String), ///

The request would result in user exceeding the allowed number of cluster subnet groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

ClusterSubnetGroupQuotaExceededFault(String), ///

The request would result in user exceeding the allowed number of subnets in a cluster subnet groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

ClusterSubnetQuotaExceededFault(String), ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

The requested subnet is not valid, or not all of the subnets are in the same VPC.

InvalidSubnet(String), ///

The tag is invalid.

InvalidTagFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl CreateClusterSubnetGroupError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSubnetGroupAlreadyExists" => { return RusotoError::Service( CreateClusterSubnetGroupError::ClusterSubnetGroupAlreadyExistsFault( parsed_error.message, ), ) } "ClusterSubnetGroupQuotaExceeded" => { return RusotoError::Service( CreateClusterSubnetGroupError::ClusterSubnetGroupQuotaExceededFault( parsed_error.message, ), ) } "ClusterSubnetQuotaExceededFault" => { return RusotoError::Service( CreateClusterSubnetGroupError::ClusterSubnetQuotaExceededFault( parsed_error.message, ), ) } "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( CreateClusterSubnetGroupError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "InvalidSubnet" => { return RusotoError::Service(CreateClusterSubnetGroupError::InvalidSubnet( parsed_error.message, )) } "InvalidTagFault" => { return RusotoError::Service( CreateClusterSubnetGroupError::InvalidTagFault(parsed_error.message), ) } "TagLimitExceededFault" => { return RusotoError::Service( CreateClusterSubnetGroupError::TagLimitExceededFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( CreateClusterSubnetGroupError::UnauthorizedOperation( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateClusterSubnetGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateClusterSubnetGroupError::ClusterSubnetGroupAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSubnetGroupError::ClusterSubnetGroupQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSubnetGroupError::ClusterSubnetQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSubnetGroupError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSubnetGroupError::InvalidSubnet(ref cause) => write!(f, "{}", cause), CreateClusterSubnetGroupError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateClusterSubnetGroupError::TagLimitExceededFault(ref cause) => { write!(f, "{}", cause) } CreateClusterSubnetGroupError::UnauthorizedOperation(ref cause) => { write!(f, "{}", cause) } } } } impl Error for CreateClusterSubnetGroupError {} /// Errors returned by CreateEventSubscription #[derive(Debug, PartialEq)] pub enum CreateEventSubscriptionError { ///

The request would exceed the allowed number of event subscriptions for this account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

EventSubscriptionQuotaExceededFault(String), ///

The tag is invalid.

InvalidTagFault(String), ///

Amazon SNS has responded that there is a problem with the specified Amazon SNS topic.

SNSInvalidTopicFault(String), ///

You do not have permission to publish to the specified Amazon SNS topic.

SNSNoAuthorizationFault(String), ///

An Amazon SNS topic with the specified Amazon Resource Name (ARN) does not exist.

SNSTopicArnNotFoundFault(String), ///

The specified Amazon Redshift event source could not be found.

SourceNotFoundFault(String), ///

There is already an existing event notification subscription with the specified name.

SubscriptionAlreadyExistFault(String), ///

The value specified for the event category was not one of the allowed values, or it specified a category that does not apply to the specified source type. The allowed values are Configuration, Management, Monitoring, and Security.

SubscriptionCategoryNotFoundFault(String), ///

An Amazon Redshift event with the specified event ID does not exist.

SubscriptionEventIdNotFoundFault(String), ///

The value specified for the event severity was not one of the allowed values, or it specified a severity that does not apply to the specified source type. The allowed values are ERROR and INFO.

SubscriptionSeverityNotFoundFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateEventSubscriptionError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "EventSubscriptionQuotaExceeded" => { return RusotoError::Service( CreateEventSubscriptionError::EventSubscriptionQuotaExceededFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service(CreateEventSubscriptionError::InvalidTagFault( parsed_error.message, )) } "SNSInvalidTopic" => { return RusotoError::Service( CreateEventSubscriptionError::SNSInvalidTopicFault( parsed_error.message, ), ) } "SNSNoAuthorization" => { return RusotoError::Service( CreateEventSubscriptionError::SNSNoAuthorizationFault( parsed_error.message, ), ) } "SNSTopicArnNotFound" => { return RusotoError::Service( CreateEventSubscriptionError::SNSTopicArnNotFoundFault( parsed_error.message, ), ) } "SourceNotFound" => { return RusotoError::Service( CreateEventSubscriptionError::SourceNotFoundFault(parsed_error.message), ) } "SubscriptionAlreadyExist" => { return RusotoError::Service( CreateEventSubscriptionError::SubscriptionAlreadyExistFault( parsed_error.message, ), ) } "SubscriptionCategoryNotFound" => { return RusotoError::Service( CreateEventSubscriptionError::SubscriptionCategoryNotFoundFault( parsed_error.message, ), ) } "SubscriptionEventIdNotFound" => { return RusotoError::Service( CreateEventSubscriptionError::SubscriptionEventIdNotFoundFault( parsed_error.message, ), ) } "SubscriptionSeverityNotFound" => { return RusotoError::Service( CreateEventSubscriptionError::SubscriptionSeverityNotFoundFault( parsed_error.message, ), ) } "TagLimitExceededFault" => { return RusotoError::Service( CreateEventSubscriptionError::TagLimitExceededFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateEventSubscriptionError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateEventSubscriptionError::EventSubscriptionQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateEventSubscriptionError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateEventSubscriptionError::SNSInvalidTopicFault(ref cause) => write!(f, "{}", cause), CreateEventSubscriptionError::SNSNoAuthorizationFault(ref cause) => { write!(f, "{}", cause) } CreateEventSubscriptionError::SNSTopicArnNotFoundFault(ref cause) => { write!(f, "{}", cause) } CreateEventSubscriptionError::SourceNotFoundFault(ref cause) => write!(f, "{}", cause), CreateEventSubscriptionError::SubscriptionAlreadyExistFault(ref cause) => { write!(f, "{}", cause) } CreateEventSubscriptionError::SubscriptionCategoryNotFoundFault(ref cause) => { write!(f, "{}", cause) } CreateEventSubscriptionError::SubscriptionEventIdNotFoundFault(ref cause) => { write!(f, "{}", cause) } CreateEventSubscriptionError::SubscriptionSeverityNotFoundFault(ref cause) => { write!(f, "{}", cause) } CreateEventSubscriptionError::TagLimitExceededFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for CreateEventSubscriptionError {} /// Errors returned by CreateHsmClientCertificate #[derive(Debug, PartialEq)] pub enum CreateHsmClientCertificateError { ///

There is already an existing Amazon Redshift HSM client certificate with the specified identifier.

HsmClientCertificateAlreadyExistsFault(String), ///

The quota for HSM client certificates has been reached. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

HsmClientCertificateQuotaExceededFault(String), ///

The tag is invalid.

InvalidTagFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateHsmClientCertificateError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "HsmClientCertificateAlreadyExistsFault" => { return RusotoError::Service( CreateHsmClientCertificateError::HsmClientCertificateAlreadyExistsFault( parsed_error.message, ), ) } "HsmClientCertificateQuotaExceededFault" => { return RusotoError::Service( CreateHsmClientCertificateError::HsmClientCertificateQuotaExceededFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service( CreateHsmClientCertificateError::InvalidTagFault(parsed_error.message), ) } "TagLimitExceededFault" => { return RusotoError::Service( CreateHsmClientCertificateError::TagLimitExceededFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateHsmClientCertificateError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateHsmClientCertificateError::HsmClientCertificateAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CreateHsmClientCertificateError::HsmClientCertificateQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateHsmClientCertificateError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateHsmClientCertificateError::TagLimitExceededFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for CreateHsmClientCertificateError {} /// Errors returned by CreateHsmConfiguration #[derive(Debug, PartialEq)] pub enum CreateHsmConfigurationError { ///

There is already an existing Amazon Redshift HSM configuration with the specified identifier.

HsmConfigurationAlreadyExistsFault(String), ///

The quota for HSM configurations has been reached. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

HsmConfigurationQuotaExceededFault(String), ///

The tag is invalid.

InvalidTagFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateHsmConfigurationError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "HsmConfigurationAlreadyExistsFault" => { return RusotoError::Service( CreateHsmConfigurationError::HsmConfigurationAlreadyExistsFault( parsed_error.message, ), ) } "HsmConfigurationQuotaExceededFault" => { return RusotoError::Service( CreateHsmConfigurationError::HsmConfigurationQuotaExceededFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service(CreateHsmConfigurationError::InvalidTagFault( parsed_error.message, )) } "TagLimitExceededFault" => { return RusotoError::Service( CreateHsmConfigurationError::TagLimitExceededFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateHsmConfigurationError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateHsmConfigurationError::HsmConfigurationAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CreateHsmConfigurationError::HsmConfigurationQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateHsmConfigurationError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateHsmConfigurationError::TagLimitExceededFault(ref cause) => write!(f, "{}", cause), } } } impl Error for CreateHsmConfigurationError {} /// Errors returned by CreateScheduledAction #[derive(Debug, PartialEq)] pub enum CreateScheduledActionError { ///

The schedule you submitted isn't valid.

InvalidScheduleFault(String), ///

The scheduled action is not valid.

InvalidScheduledActionFault(String), ///

The scheduled action already exists.

ScheduledActionAlreadyExistsFault(String), ///

The quota for scheduled actions exceeded.

ScheduledActionQuotaExceededFault(String), ///

The action type specified for a scheduled action is not supported.

ScheduledActionTypeUnsupportedFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl CreateScheduledActionError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidSchedule" => { return RusotoError::Service( CreateScheduledActionError::InvalidScheduleFault(parsed_error.message), ) } "InvalidScheduledAction" => { return RusotoError::Service( CreateScheduledActionError::InvalidScheduledActionFault( parsed_error.message, ), ) } "ScheduledActionAlreadyExists" => { return RusotoError::Service( CreateScheduledActionError::ScheduledActionAlreadyExistsFault( parsed_error.message, ), ) } "ScheduledActionQuotaExceeded" => { return RusotoError::Service( CreateScheduledActionError::ScheduledActionQuotaExceededFault( parsed_error.message, ), ) } "ScheduledActionTypeUnsupported" => { return RusotoError::Service( CreateScheduledActionError::ScheduledActionTypeUnsupportedFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( CreateScheduledActionError::UnauthorizedOperation(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateScheduledActionError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateScheduledActionError::InvalidScheduleFault(ref cause) => write!(f, "{}", cause), CreateScheduledActionError::InvalidScheduledActionFault(ref cause) => { write!(f, "{}", cause) } CreateScheduledActionError::ScheduledActionAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CreateScheduledActionError::ScheduledActionQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateScheduledActionError::ScheduledActionTypeUnsupportedFault(ref cause) => { write!(f, "{}", cause) } CreateScheduledActionError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), } } } impl Error for CreateScheduledActionError {} /// Errors returned by CreateSnapshotCopyGrant #[derive(Debug, PartialEq)] pub enum CreateSnapshotCopyGrantError { ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

The tag is invalid.

InvalidTagFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

LimitExceededFault(String), ///

The snapshot copy grant can't be created because a grant with the same name already exists.

SnapshotCopyGrantAlreadyExistsFault(String), ///

The AWS account has exceeded the maximum number of snapshot copy grants in this region.

SnapshotCopyGrantQuotaExceededFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateSnapshotCopyGrantError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( CreateSnapshotCopyGrantError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service(CreateSnapshotCopyGrantError::InvalidTagFault( parsed_error.message, )) } "LimitExceededFault" => { return RusotoError::Service( CreateSnapshotCopyGrantError::LimitExceededFault(parsed_error.message), ) } "SnapshotCopyGrantAlreadyExistsFault" => { return RusotoError::Service( CreateSnapshotCopyGrantError::SnapshotCopyGrantAlreadyExistsFault( parsed_error.message, ), ) } "SnapshotCopyGrantQuotaExceededFault" => { return RusotoError::Service( CreateSnapshotCopyGrantError::SnapshotCopyGrantQuotaExceededFault( parsed_error.message, ), ) } "TagLimitExceededFault" => { return RusotoError::Service( CreateSnapshotCopyGrantError::TagLimitExceededFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateSnapshotCopyGrantError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateSnapshotCopyGrantError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } CreateSnapshotCopyGrantError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateSnapshotCopyGrantError::LimitExceededFault(ref cause) => write!(f, "{}", cause), CreateSnapshotCopyGrantError::SnapshotCopyGrantAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CreateSnapshotCopyGrantError::SnapshotCopyGrantQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateSnapshotCopyGrantError::TagLimitExceededFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for CreateSnapshotCopyGrantError {} /// Errors returned by CreateSnapshotSchedule #[derive(Debug, PartialEq)] pub enum CreateSnapshotScheduleError { ///

The schedule you submitted isn't valid.

InvalidScheduleFault(String), ///

The definition you submitted is not supported.

ScheduleDefinitionTypeUnsupportedFault(String), ///

The specified snapshot schedule already exists.

SnapshotScheduleAlreadyExistsFault(String), ///

You have exceeded the quota of snapshot schedules.

SnapshotScheduleQuotaExceededFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateSnapshotScheduleError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidSchedule" => { return RusotoError::Service( CreateSnapshotScheduleError::InvalidScheduleFault(parsed_error.message), ) } "ScheduleDefinitionTypeUnsupported" => { return RusotoError::Service( CreateSnapshotScheduleError::ScheduleDefinitionTypeUnsupportedFault( parsed_error.message, ), ) } "SnapshotScheduleAlreadyExists" => { return RusotoError::Service( CreateSnapshotScheduleError::SnapshotScheduleAlreadyExistsFault( parsed_error.message, ), ) } "SnapshotScheduleQuotaExceeded" => { return RusotoError::Service( CreateSnapshotScheduleError::SnapshotScheduleQuotaExceededFault( parsed_error.message, ), ) } "TagLimitExceededFault" => { return RusotoError::Service( CreateSnapshotScheduleError::TagLimitExceededFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateSnapshotScheduleError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateSnapshotScheduleError::InvalidScheduleFault(ref cause) => write!(f, "{}", cause), CreateSnapshotScheduleError::ScheduleDefinitionTypeUnsupportedFault(ref cause) => { write!(f, "{}", cause) } CreateSnapshotScheduleError::SnapshotScheduleAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } CreateSnapshotScheduleError::SnapshotScheduleQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } CreateSnapshotScheduleError::TagLimitExceededFault(ref cause) => write!(f, "{}", cause), } } } impl Error for CreateSnapshotScheduleError {} /// Errors returned by CreateTags #[derive(Debug, PartialEq)] pub enum CreateTagsError { ///

The tag is invalid.

InvalidTagFault(String), ///

The resource could not be found.

ResourceNotFoundFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), } impl CreateTagsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidTagFault" => { return RusotoError::Service(CreateTagsError::InvalidTagFault( parsed_error.message, )) } "ResourceNotFoundFault" => { return RusotoError::Service(CreateTagsError::ResourceNotFoundFault( parsed_error.message, )) } "TagLimitExceededFault" => { return RusotoError::Service(CreateTagsError::TagLimitExceededFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateTagsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateTagsError::InvalidTagFault(ref cause) => write!(f, "{}", cause), CreateTagsError::ResourceNotFoundFault(ref cause) => write!(f, "{}", cause), CreateTagsError::TagLimitExceededFault(ref cause) => write!(f, "{}", cause), } } } impl Error for CreateTagsError {} /// Errors returned by CreateUsageLimit #[derive(Debug, PartialEq)] pub enum CreateUsageLimitError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The usage limit is not valid.

InvalidUsageLimitFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

LimitExceededFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), ///

The usage limit already exists.

UsageLimitAlreadyExistsFault(String), } impl CreateUsageLimitError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(CreateUsageLimitError::ClusterNotFoundFault( parsed_error.message, )) } "InvalidClusterState" => { return RusotoError::Service( CreateUsageLimitError::InvalidClusterStateFault(parsed_error.message), ) } "InvalidUsageLimit" => { return RusotoError::Service(CreateUsageLimitError::InvalidUsageLimitFault( parsed_error.message, )) } "LimitExceededFault" => { return RusotoError::Service(CreateUsageLimitError::LimitExceededFault( parsed_error.message, )) } "TagLimitExceededFault" => { return RusotoError::Service(CreateUsageLimitError::TagLimitExceededFault( parsed_error.message, )) } "UnsupportedOperation" => { return RusotoError::Service( CreateUsageLimitError::UnsupportedOperationFault(parsed_error.message), ) } "UsageLimitAlreadyExists" => { return RusotoError::Service( CreateUsageLimitError::UsageLimitAlreadyExistsFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for CreateUsageLimitError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CreateUsageLimitError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), CreateUsageLimitError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), CreateUsageLimitError::InvalidUsageLimitFault(ref cause) => write!(f, "{}", cause), CreateUsageLimitError::LimitExceededFault(ref cause) => write!(f, "{}", cause), CreateUsageLimitError::TagLimitExceededFault(ref cause) => write!(f, "{}", cause), CreateUsageLimitError::UnsupportedOperationFault(ref cause) => write!(f, "{}", cause), CreateUsageLimitError::UsageLimitAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for CreateUsageLimitError {} /// Errors returned by DeleteCluster #[derive(Debug, PartialEq)] pub enum DeleteClusterError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The value specified as a snapshot identifier is already used by an existing snapshot.

ClusterSnapshotAlreadyExistsFault(String), ///

The request would result in the user exceeding the allowed number of cluster snapshots.

ClusterSnapshotQuotaExceededFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), } impl DeleteClusterError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(DeleteClusterError::ClusterNotFoundFault( parsed_error.message, )) } "ClusterSnapshotAlreadyExists" => { return RusotoError::Service( DeleteClusterError::ClusterSnapshotAlreadyExistsFault( parsed_error.message, ), ) } "ClusterSnapshotQuotaExceeded" => { return RusotoError::Service( DeleteClusterError::ClusterSnapshotQuotaExceededFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service(DeleteClusterError::InvalidClusterStateFault( parsed_error.message, )) } "InvalidRetentionPeriodFault" => { return RusotoError::Service( DeleteClusterError::InvalidRetentionPeriodFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteClusterError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteClusterError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), DeleteClusterError::ClusterSnapshotAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } DeleteClusterError::ClusterSnapshotQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } DeleteClusterError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), DeleteClusterError::InvalidRetentionPeriodFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DeleteClusterError {} /// Errors returned by DeleteClusterParameterGroup #[derive(Debug, PartialEq)] pub enum DeleteClusterParameterGroupError { ///

The parameter group name does not refer to an existing parameter group.

ClusterParameterGroupNotFoundFault(String), ///

The cluster parameter group action can not be completed because another task is in progress that involves the parameter group. Wait a few moments and try the operation again.

InvalidClusterParameterGroupStateFault(String), } impl DeleteClusterParameterGroupError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterParameterGroupNotFound" => { return RusotoError::Service( DeleteClusterParameterGroupError::ClusterParameterGroupNotFoundFault( parsed_error.message, ), ) } "InvalidClusterParameterGroupState" => return RusotoError::Service( DeleteClusterParameterGroupError::InvalidClusterParameterGroupStateFault( parsed_error.message, ), ), _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteClusterParameterGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteClusterParameterGroupError::ClusterParameterGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } DeleteClusterParameterGroupError::InvalidClusterParameterGroupStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteClusterParameterGroupError {} /// Errors returned by DeleteClusterSecurityGroup #[derive(Debug, PartialEq)] pub enum DeleteClusterSecurityGroupError { ///

The cluster security group name does not refer to an existing cluster security group.

ClusterSecurityGroupNotFoundFault(String), ///

The state of the cluster security group is not available.

InvalidClusterSecurityGroupStateFault(String), } impl DeleteClusterSecurityGroupError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSecurityGroupNotFound" => { return RusotoError::Service( DeleteClusterSecurityGroupError::ClusterSecurityGroupNotFoundFault( parsed_error.message, ), ) } "InvalidClusterSecurityGroupState" => { return RusotoError::Service( DeleteClusterSecurityGroupError::InvalidClusterSecurityGroupStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteClusterSecurityGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteClusterSecurityGroupError::ClusterSecurityGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } DeleteClusterSecurityGroupError::InvalidClusterSecurityGroupStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteClusterSecurityGroupError {} /// Errors returned by DeleteClusterSnapshot #[derive(Debug, PartialEq)] pub enum DeleteClusterSnapshotError { ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), ///

The specified cluster snapshot is not in the available state, or other accounts are authorized to access the snapshot.

InvalidClusterSnapshotStateFault(String), } impl DeleteClusterSnapshotError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSnapshotNotFound" => { return RusotoError::Service( DeleteClusterSnapshotError::ClusterSnapshotNotFoundFault( parsed_error.message, ), ) } "InvalidClusterSnapshotState" => { return RusotoError::Service( DeleteClusterSnapshotError::InvalidClusterSnapshotStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteClusterSnapshotError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteClusterSnapshotError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } DeleteClusterSnapshotError::InvalidClusterSnapshotStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteClusterSnapshotError {} /// Errors returned by DeleteClusterSubnetGroup #[derive(Debug, PartialEq)] pub enum DeleteClusterSubnetGroupError { ///

The cluster subnet group name does not refer to an existing cluster subnet group.

ClusterSubnetGroupNotFoundFault(String), ///

The cluster subnet group cannot be deleted because it is in use.

InvalidClusterSubnetGroupStateFault(String), ///

The state of the subnet is invalid.

InvalidClusterSubnetStateFault(String), } impl DeleteClusterSubnetGroupError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSubnetGroupNotFoundFault" => { return RusotoError::Service( DeleteClusterSubnetGroupError::ClusterSubnetGroupNotFoundFault( parsed_error.message, ), ) } "InvalidClusterSubnetGroupStateFault" => { return RusotoError::Service( DeleteClusterSubnetGroupError::InvalidClusterSubnetGroupStateFault( parsed_error.message, ), ) } "InvalidClusterSubnetStateFault" => { return RusotoError::Service( DeleteClusterSubnetGroupError::InvalidClusterSubnetStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteClusterSubnetGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteClusterSubnetGroupError::ClusterSubnetGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } DeleteClusterSubnetGroupError::InvalidClusterSubnetGroupStateFault(ref cause) => { write!(f, "{}", cause) } DeleteClusterSubnetGroupError::InvalidClusterSubnetStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteClusterSubnetGroupError {} /// Errors returned by DeleteEventSubscription #[derive(Debug, PartialEq)] pub enum DeleteEventSubscriptionError { ///

The subscription request is invalid because it is a duplicate request. This subscription request is already in progress.

InvalidSubscriptionStateFault(String), ///

An Amazon Redshift event notification subscription with the specified name does not exist.

SubscriptionNotFoundFault(String), } impl DeleteEventSubscriptionError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidSubscriptionStateFault" => { return RusotoError::Service( DeleteEventSubscriptionError::InvalidSubscriptionStateFault( parsed_error.message, ), ) } "SubscriptionNotFound" => { return RusotoError::Service( DeleteEventSubscriptionError::SubscriptionNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteEventSubscriptionError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteEventSubscriptionError::InvalidSubscriptionStateFault(ref cause) => { write!(f, "{}", cause) } DeleteEventSubscriptionError::SubscriptionNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteEventSubscriptionError {} /// Errors returned by DeleteHsmClientCertificate #[derive(Debug, PartialEq)] pub enum DeleteHsmClientCertificateError { ///

There is no Amazon Redshift HSM client certificate with the specified identifier.

HsmClientCertificateNotFoundFault(String), ///

The specified HSM client certificate is not in the available state, or it is still in use by one or more Amazon Redshift clusters.

InvalidHsmClientCertificateStateFault(String), } impl DeleteHsmClientCertificateError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "HsmClientCertificateNotFoundFault" => { return RusotoError::Service( DeleteHsmClientCertificateError::HsmClientCertificateNotFoundFault( parsed_error.message, ), ) } "InvalidHsmClientCertificateStateFault" => { return RusotoError::Service( DeleteHsmClientCertificateError::InvalidHsmClientCertificateStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteHsmClientCertificateError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteHsmClientCertificateError::HsmClientCertificateNotFoundFault(ref cause) => { write!(f, "{}", cause) } DeleteHsmClientCertificateError::InvalidHsmClientCertificateStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteHsmClientCertificateError {} /// Errors returned by DeleteHsmConfiguration #[derive(Debug, PartialEq)] pub enum DeleteHsmConfigurationError { ///

There is no Amazon Redshift HSM configuration with the specified identifier.

HsmConfigurationNotFoundFault(String), ///

The specified HSM configuration is not in the available state, or it is still in use by one or more Amazon Redshift clusters.

InvalidHsmConfigurationStateFault(String), } impl DeleteHsmConfigurationError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "HsmConfigurationNotFoundFault" => { return RusotoError::Service( DeleteHsmConfigurationError::HsmConfigurationNotFoundFault( parsed_error.message, ), ) } "InvalidHsmConfigurationStateFault" => { return RusotoError::Service( DeleteHsmConfigurationError::InvalidHsmConfigurationStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteHsmConfigurationError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteHsmConfigurationError::HsmConfigurationNotFoundFault(ref cause) => { write!(f, "{}", cause) } DeleteHsmConfigurationError::InvalidHsmConfigurationStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteHsmConfigurationError {} /// Errors returned by DeleteScheduledAction #[derive(Debug, PartialEq)] pub enum DeleteScheduledActionError { ///

The scheduled action cannot be found.

ScheduledActionNotFoundFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl DeleteScheduledActionError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ScheduledActionNotFound" => { return RusotoError::Service( DeleteScheduledActionError::ScheduledActionNotFoundFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( DeleteScheduledActionError::UnauthorizedOperation(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteScheduledActionError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteScheduledActionError::ScheduledActionNotFoundFault(ref cause) => { write!(f, "{}", cause) } DeleteScheduledActionError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), } } } impl Error for DeleteScheduledActionError {} /// Errors returned by DeleteSnapshotCopyGrant #[derive(Debug, PartialEq)] pub enum DeleteSnapshotCopyGrantError { ///

The snapshot copy grant can't be deleted because it is used by one or more clusters.

InvalidSnapshotCopyGrantStateFault(String), ///

The specified snapshot copy grant can't be found. Make sure that the name is typed correctly and that the grant exists in the destination region.

SnapshotCopyGrantNotFoundFault(String), } impl DeleteSnapshotCopyGrantError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidSnapshotCopyGrantStateFault" => { return RusotoError::Service( DeleteSnapshotCopyGrantError::InvalidSnapshotCopyGrantStateFault( parsed_error.message, ), ) } "SnapshotCopyGrantNotFoundFault" => { return RusotoError::Service( DeleteSnapshotCopyGrantError::SnapshotCopyGrantNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteSnapshotCopyGrantError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteSnapshotCopyGrantError::InvalidSnapshotCopyGrantStateFault(ref cause) => { write!(f, "{}", cause) } DeleteSnapshotCopyGrantError::SnapshotCopyGrantNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteSnapshotCopyGrantError {} /// Errors returned by DeleteSnapshotSchedule #[derive(Debug, PartialEq)] pub enum DeleteSnapshotScheduleError { ///

The cluster snapshot schedule state is not valid.

InvalidClusterSnapshotScheduleStateFault(String), ///

We could not find the specified snapshot schedule.

SnapshotScheduleNotFoundFault(String), } impl DeleteSnapshotScheduleError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidClusterSnapshotScheduleState" => { return RusotoError::Service( DeleteSnapshotScheduleError::InvalidClusterSnapshotScheduleStateFault( parsed_error.message, ), ) } "SnapshotScheduleNotFound" => { return RusotoError::Service( DeleteSnapshotScheduleError::SnapshotScheduleNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteSnapshotScheduleError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteSnapshotScheduleError::InvalidClusterSnapshotScheduleStateFault(ref cause) => { write!(f, "{}", cause) } DeleteSnapshotScheduleError::SnapshotScheduleNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DeleteSnapshotScheduleError {} /// Errors returned by DeleteTags #[derive(Debug, PartialEq)] pub enum DeleteTagsError { ///

The tag is invalid.

InvalidTagFault(String), ///

The resource could not be found.

ResourceNotFoundFault(String), } impl DeleteTagsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidTagFault" => { return RusotoError::Service(DeleteTagsError::InvalidTagFault( parsed_error.message, )) } "ResourceNotFoundFault" => { return RusotoError::Service(DeleteTagsError::ResourceNotFoundFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteTagsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteTagsError::InvalidTagFault(ref cause) => write!(f, "{}", cause), DeleteTagsError::ResourceNotFoundFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DeleteTagsError {} /// Errors returned by DeleteUsageLimit #[derive(Debug, PartialEq)] pub enum DeleteUsageLimitError { ///

The requested operation isn't supported.

UnsupportedOperationFault(String), ///

The usage limit identifier can't be found.

UsageLimitNotFoundFault(String), } impl DeleteUsageLimitError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "UnsupportedOperation" => { return RusotoError::Service( DeleteUsageLimitError::UnsupportedOperationFault(parsed_error.message), ) } "UsageLimitNotFound" => { return RusotoError::Service( DeleteUsageLimitError::UsageLimitNotFoundFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DeleteUsageLimitError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DeleteUsageLimitError::UnsupportedOperationFault(ref cause) => write!(f, "{}", cause), DeleteUsageLimitError::UsageLimitNotFoundFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DeleteUsageLimitError {} /// Errors returned by DescribeAccountAttributes #[derive(Debug, PartialEq)] pub enum DescribeAccountAttributesError {} impl DescribeAccountAttributesError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeAccountAttributesError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl Error for DescribeAccountAttributesError {} /// Errors returned by DescribeClusterDbRevisions #[derive(Debug, PartialEq)] pub enum DescribeClusterDbRevisionsError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), } impl DescribeClusterDbRevisionsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( DescribeClusterDbRevisionsError::ClusterNotFoundFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service( DescribeClusterDbRevisionsError::InvalidClusterStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClusterDbRevisionsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeClusterDbRevisionsError::ClusterNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeClusterDbRevisionsError::InvalidClusterStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeClusterDbRevisionsError {} /// Errors returned by DescribeClusterParameterGroups #[derive(Debug, PartialEq)] pub enum DescribeClusterParameterGroupsError { ///

The parameter group name does not refer to an existing parameter group.

ClusterParameterGroupNotFoundFault(String), ///

The tag is invalid.

InvalidTagFault(String), } impl DescribeClusterParameterGroupsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterParameterGroupNotFound" => { return RusotoError::Service( DescribeClusterParameterGroupsError::ClusterParameterGroupNotFoundFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service( DescribeClusterParameterGroupsError::InvalidTagFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClusterParameterGroupsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeClusterParameterGroupsError::ClusterParameterGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeClusterParameterGroupsError::InvalidTagFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeClusterParameterGroupsError {} /// Errors returned by DescribeClusterParameters #[derive(Debug, PartialEq)] pub enum DescribeClusterParametersError { ///

The parameter group name does not refer to an existing parameter group.

ClusterParameterGroupNotFoundFault(String), } impl DescribeClusterParametersError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterParameterGroupNotFound" => { return RusotoError::Service( DescribeClusterParametersError::ClusterParameterGroupNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClusterParametersError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeClusterParametersError::ClusterParameterGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeClusterParametersError {} /// Errors returned by DescribeClusterSecurityGroups #[derive(Debug, PartialEq)] pub enum DescribeClusterSecurityGroupsError { ///

The cluster security group name does not refer to an existing cluster security group.

ClusterSecurityGroupNotFoundFault(String), ///

The tag is invalid.

InvalidTagFault(String), } impl DescribeClusterSecurityGroupsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSecurityGroupNotFound" => { return RusotoError::Service( DescribeClusterSecurityGroupsError::ClusterSecurityGroupNotFoundFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service( DescribeClusterSecurityGroupsError::InvalidTagFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClusterSecurityGroupsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeClusterSecurityGroupsError::ClusterSecurityGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeClusterSecurityGroupsError::InvalidTagFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeClusterSecurityGroupsError {} /// Errors returned by DescribeClusterSnapshots #[derive(Debug, PartialEq)] pub enum DescribeClusterSnapshotsError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), ///

The tag is invalid.

InvalidTagFault(String), } impl DescribeClusterSnapshotsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( DescribeClusterSnapshotsError::ClusterNotFoundFault( parsed_error.message, ), ) } "ClusterSnapshotNotFound" => { return RusotoError::Service( DescribeClusterSnapshotsError::ClusterSnapshotNotFoundFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service( DescribeClusterSnapshotsError::InvalidTagFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClusterSnapshotsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeClusterSnapshotsError::ClusterNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeClusterSnapshotsError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeClusterSnapshotsError::InvalidTagFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DescribeClusterSnapshotsError {} /// Errors returned by DescribeClusterSubnetGroups #[derive(Debug, PartialEq)] pub enum DescribeClusterSubnetGroupsError { ///

The cluster subnet group name does not refer to an existing cluster subnet group.

ClusterSubnetGroupNotFoundFault(String), ///

The tag is invalid.

InvalidTagFault(String), } impl DescribeClusterSubnetGroupsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSubnetGroupNotFoundFault" => { return RusotoError::Service( DescribeClusterSubnetGroupsError::ClusterSubnetGroupNotFoundFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service( DescribeClusterSubnetGroupsError::InvalidTagFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClusterSubnetGroupsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeClusterSubnetGroupsError::ClusterSubnetGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeClusterSubnetGroupsError::InvalidTagFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DescribeClusterSubnetGroupsError {} /// Errors returned by DescribeClusterTracks #[derive(Debug, PartialEq)] pub enum DescribeClusterTracksError { ///

The provided cluster track name is not valid.

InvalidClusterTrackFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl DescribeClusterTracksError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidClusterTrack" => { return RusotoError::Service( DescribeClusterTracksError::InvalidClusterTrackFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( DescribeClusterTracksError::UnauthorizedOperation(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClusterTracksError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeClusterTracksError::InvalidClusterTrackFault(ref cause) => { write!(f, "{}", cause) } DescribeClusterTracksError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), } } } impl Error for DescribeClusterTracksError {} /// Errors returned by DescribeClusterVersions #[derive(Debug, PartialEq)] pub enum DescribeClusterVersionsError {} impl DescribeClusterVersionsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClusterVersionsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl Error for DescribeClusterVersionsError {} /// Errors returned by DescribeClusters #[derive(Debug, PartialEq)] pub enum DescribeClustersError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The tag is invalid.

InvalidTagFault(String), } impl DescribeClustersError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(DescribeClustersError::ClusterNotFoundFault( parsed_error.message, )) } "InvalidTagFault" => { return RusotoError::Service(DescribeClustersError::InvalidTagFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeClustersError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeClustersError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), DescribeClustersError::InvalidTagFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DescribeClustersError {} /// Errors returned by DescribeDefaultClusterParameters #[derive(Debug, PartialEq)] pub enum DescribeDefaultClusterParametersError {} impl DescribeDefaultClusterParametersError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeDefaultClusterParametersError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl Error for DescribeDefaultClusterParametersError {} /// Errors returned by DescribeEventCategories #[derive(Debug, PartialEq)] pub enum DescribeEventCategoriesError {} impl DescribeEventCategoriesError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeEventCategoriesError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl Error for DescribeEventCategoriesError {} /// Errors returned by DescribeEventSubscriptions #[derive(Debug, PartialEq)] pub enum DescribeEventSubscriptionsError { ///

The tag is invalid.

InvalidTagFault(String), ///

An Amazon Redshift event notification subscription with the specified name does not exist.

SubscriptionNotFoundFault(String), } impl DescribeEventSubscriptionsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidTagFault" => { return RusotoError::Service( DescribeEventSubscriptionsError::InvalidTagFault(parsed_error.message), ) } "SubscriptionNotFound" => { return RusotoError::Service( DescribeEventSubscriptionsError::SubscriptionNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeEventSubscriptionsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeEventSubscriptionsError::InvalidTagFault(ref cause) => write!(f, "{}", cause), DescribeEventSubscriptionsError::SubscriptionNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeEventSubscriptionsError {} /// Errors returned by DescribeEvents #[derive(Debug, PartialEq)] pub enum DescribeEventsError {} impl DescribeEventsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeEventsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl Error for DescribeEventsError {} /// Errors returned by DescribeHsmClientCertificates #[derive(Debug, PartialEq)] pub enum DescribeHsmClientCertificatesError { ///

There is no Amazon Redshift HSM client certificate with the specified identifier.

HsmClientCertificateNotFoundFault(String), ///

The tag is invalid.

InvalidTagFault(String), } impl DescribeHsmClientCertificatesError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "HsmClientCertificateNotFoundFault" => { return RusotoError::Service( DescribeHsmClientCertificatesError::HsmClientCertificateNotFoundFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service( DescribeHsmClientCertificatesError::InvalidTagFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeHsmClientCertificatesError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeHsmClientCertificatesError::HsmClientCertificateNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeHsmClientCertificatesError::InvalidTagFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeHsmClientCertificatesError {} /// Errors returned by DescribeHsmConfigurations #[derive(Debug, PartialEq)] pub enum DescribeHsmConfigurationsError { ///

There is no Amazon Redshift HSM configuration with the specified identifier.

HsmConfigurationNotFoundFault(String), ///

The tag is invalid.

InvalidTagFault(String), } impl DescribeHsmConfigurationsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "HsmConfigurationNotFoundFault" => { return RusotoError::Service( DescribeHsmConfigurationsError::HsmConfigurationNotFoundFault( parsed_error.message, ), ) } "InvalidTagFault" => { return RusotoError::Service( DescribeHsmConfigurationsError::InvalidTagFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeHsmConfigurationsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeHsmConfigurationsError::HsmConfigurationNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeHsmConfigurationsError::InvalidTagFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DescribeHsmConfigurationsError {} /// Errors returned by DescribeLoggingStatus #[derive(Debug, PartialEq)] pub enum DescribeLoggingStatusError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), } impl DescribeLoggingStatusError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( DescribeLoggingStatusError::ClusterNotFoundFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeLoggingStatusError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeLoggingStatusError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DescribeLoggingStatusError {} /// Errors returned by DescribeNodeConfigurationOptions #[derive(Debug, PartialEq)] pub enum DescribeNodeConfigurationOptionsError { ///

The owner of the specified snapshot has not authorized your account to access the snapshot.

AccessToSnapshotDeniedFault(String), ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), ///

The specified cluster snapshot is not in the available state, or other accounts are authorized to access the snapshot.

InvalidClusterSnapshotStateFault(String), } impl DescribeNodeConfigurationOptionsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "AccessToSnapshotDenied" => { return RusotoError::Service( DescribeNodeConfigurationOptionsError::AccessToSnapshotDeniedFault( parsed_error.message, ), ) } "ClusterNotFound" => { return RusotoError::Service( DescribeNodeConfigurationOptionsError::ClusterNotFoundFault( parsed_error.message, ), ) } "ClusterSnapshotNotFound" => { return RusotoError::Service( DescribeNodeConfigurationOptionsError::ClusterSnapshotNotFoundFault( parsed_error.message, ), ) } "InvalidClusterSnapshotState" => { return RusotoError::Service( DescribeNodeConfigurationOptionsError::InvalidClusterSnapshotStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeNodeConfigurationOptionsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeNodeConfigurationOptionsError::AccessToSnapshotDeniedFault(ref cause) => { write!(f, "{}", cause) } DescribeNodeConfigurationOptionsError::ClusterNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeNodeConfigurationOptionsError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeNodeConfigurationOptionsError::InvalidClusterSnapshotStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeNodeConfigurationOptionsError {} /// Errors returned by DescribeOrderableClusterOptions #[derive(Debug, PartialEq)] pub enum DescribeOrderableClusterOptionsError {} impl DescribeOrderableClusterOptionsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeOrderableClusterOptionsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl Error for DescribeOrderableClusterOptionsError {} /// Errors returned by DescribeReservedNodeOfferings #[derive(Debug, PartialEq)] pub enum DescribeReservedNodeOfferingsError { ///

Your request cannot be completed because a dependent internal service is temporarily unavailable. Wait 30 to 60 seconds and try again.

DependentServiceUnavailableFault(String), ///

Specified offering does not exist.

ReservedNodeOfferingNotFoundFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), } impl DescribeReservedNodeOfferingsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "DependentServiceUnavailableFault" => { return RusotoError::Service( DescribeReservedNodeOfferingsError::DependentServiceUnavailableFault( parsed_error.message, ), ) } "ReservedNodeOfferingNotFound" => { return RusotoError::Service( DescribeReservedNodeOfferingsError::ReservedNodeOfferingNotFoundFault( parsed_error.message, ), ) } "UnsupportedOperation" => { return RusotoError::Service( DescribeReservedNodeOfferingsError::UnsupportedOperationFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeReservedNodeOfferingsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeReservedNodeOfferingsError::DependentServiceUnavailableFault(ref cause) => { write!(f, "{}", cause) } DescribeReservedNodeOfferingsError::ReservedNodeOfferingNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeReservedNodeOfferingsError::UnsupportedOperationFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeReservedNodeOfferingsError {} /// Errors returned by DescribeReservedNodes #[derive(Debug, PartialEq)] pub enum DescribeReservedNodesError { ///

Your request cannot be completed because a dependent internal service is temporarily unavailable. Wait 30 to 60 seconds and try again.

DependentServiceUnavailableFault(String), ///

The specified reserved compute node not found.

ReservedNodeNotFoundFault(String), } impl DescribeReservedNodesError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "DependentServiceUnavailableFault" => { return RusotoError::Service( DescribeReservedNodesError::DependentServiceUnavailableFault( parsed_error.message, ), ) } "ReservedNodeNotFound" => { return RusotoError::Service( DescribeReservedNodesError::ReservedNodeNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeReservedNodesError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeReservedNodesError::DependentServiceUnavailableFault(ref cause) => { write!(f, "{}", cause) } DescribeReservedNodesError::ReservedNodeNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeReservedNodesError {} /// Errors returned by DescribeResize #[derive(Debug, PartialEq)] pub enum DescribeResizeError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

A resize operation for the specified cluster is not found.

ResizeNotFoundFault(String), } impl DescribeResizeError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(DescribeResizeError::ClusterNotFoundFault( parsed_error.message, )) } "ResizeNotFound" => { return RusotoError::Service(DescribeResizeError::ResizeNotFoundFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeResizeError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeResizeError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), DescribeResizeError::ResizeNotFoundFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DescribeResizeError {} /// Errors returned by DescribeScheduledActions #[derive(Debug, PartialEq)] pub enum DescribeScheduledActionsError { ///

The scheduled action cannot be found.

ScheduledActionNotFoundFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl DescribeScheduledActionsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ScheduledActionNotFound" => { return RusotoError::Service( DescribeScheduledActionsError::ScheduledActionNotFoundFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( DescribeScheduledActionsError::UnauthorizedOperation( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeScheduledActionsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeScheduledActionsError::ScheduledActionNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeScheduledActionsError::UnauthorizedOperation(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeScheduledActionsError {} /// Errors returned by DescribeSnapshotCopyGrants #[derive(Debug, PartialEq)] pub enum DescribeSnapshotCopyGrantsError { ///

The tag is invalid.

InvalidTagFault(String), ///

The specified snapshot copy grant can't be found. Make sure that the name is typed correctly and that the grant exists in the destination region.

SnapshotCopyGrantNotFoundFault(String), } impl DescribeSnapshotCopyGrantsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidTagFault" => { return RusotoError::Service( DescribeSnapshotCopyGrantsError::InvalidTagFault(parsed_error.message), ) } "SnapshotCopyGrantNotFoundFault" => { return RusotoError::Service( DescribeSnapshotCopyGrantsError::SnapshotCopyGrantNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeSnapshotCopyGrantsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeSnapshotCopyGrantsError::InvalidTagFault(ref cause) => write!(f, "{}", cause), DescribeSnapshotCopyGrantsError::SnapshotCopyGrantNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeSnapshotCopyGrantsError {} /// Errors returned by DescribeSnapshotSchedules #[derive(Debug, PartialEq)] pub enum DescribeSnapshotSchedulesError {} impl DescribeSnapshotSchedulesError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeSnapshotSchedulesError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl Error for DescribeSnapshotSchedulesError {} /// Errors returned by DescribeStorage #[derive(Debug, PartialEq)] pub enum DescribeStorageError {} impl DescribeStorageError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeStorageError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl Error for DescribeStorageError {} /// Errors returned by DescribeTableRestoreStatus #[derive(Debug, PartialEq)] pub enum DescribeTableRestoreStatusError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified TableRestoreRequestId value was not found.

TableRestoreNotFoundFault(String), } impl DescribeTableRestoreStatusError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( DescribeTableRestoreStatusError::ClusterNotFoundFault( parsed_error.message, ), ) } "TableRestoreNotFoundFault" => { return RusotoError::Service( DescribeTableRestoreStatusError::TableRestoreNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeTableRestoreStatusError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeTableRestoreStatusError::ClusterNotFoundFault(ref cause) => { write!(f, "{}", cause) } DescribeTableRestoreStatusError::TableRestoreNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeTableRestoreStatusError {} /// Errors returned by DescribeTags #[derive(Debug, PartialEq)] pub enum DescribeTagsError { ///

The tag is invalid.

InvalidTagFault(String), ///

The resource could not be found.

ResourceNotFoundFault(String), } impl DescribeTagsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidTagFault" => { return RusotoError::Service(DescribeTagsError::InvalidTagFault( parsed_error.message, )) } "ResourceNotFoundFault" => { return RusotoError::Service(DescribeTagsError::ResourceNotFoundFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeTagsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeTagsError::InvalidTagFault(ref cause) => write!(f, "{}", cause), DescribeTagsError::ResourceNotFoundFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DescribeTagsError {} /// Errors returned by DescribeUsageLimits #[derive(Debug, PartialEq)] pub enum DescribeUsageLimitsError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), } impl DescribeUsageLimitsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( DescribeUsageLimitsError::ClusterNotFoundFault(parsed_error.message), ) } "UnsupportedOperation" => { return RusotoError::Service( DescribeUsageLimitsError::UnsupportedOperationFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DescribeUsageLimitsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DescribeUsageLimitsError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), DescribeUsageLimitsError::UnsupportedOperationFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for DescribeUsageLimitsError {} /// Errors returned by DisableLogging #[derive(Debug, PartialEq)] pub enum DisableLoggingError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), } impl DisableLoggingError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(DisableLoggingError::ClusterNotFoundFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DisableLoggingError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DisableLoggingError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), } } } impl Error for DisableLoggingError {} /// Errors returned by DisableSnapshotCopy #[derive(Debug, PartialEq)] pub enum DisableSnapshotCopyError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The cluster already has cross-region snapshot copy disabled.

SnapshotCopyAlreadyDisabledFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl DisableSnapshotCopyError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( DisableSnapshotCopyError::ClusterNotFoundFault(parsed_error.message), ) } "InvalidClusterState" => { return RusotoError::Service( DisableSnapshotCopyError::InvalidClusterStateFault( parsed_error.message, ), ) } "SnapshotCopyAlreadyDisabledFault" => { return RusotoError::Service( DisableSnapshotCopyError::SnapshotCopyAlreadyDisabledFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( DisableSnapshotCopyError::UnauthorizedOperation(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for DisableSnapshotCopyError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { DisableSnapshotCopyError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), DisableSnapshotCopyError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), DisableSnapshotCopyError::SnapshotCopyAlreadyDisabledFault(ref cause) => { write!(f, "{}", cause) } DisableSnapshotCopyError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), } } } impl Error for DisableSnapshotCopyError {} /// Errors returned by EnableLogging #[derive(Debug, PartialEq)] pub enum EnableLoggingError { ///

Could not find the specified S3 bucket.

BucketNotFoundFault(String), ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The cluster does not have read bucket or put object permissions on the S3 bucket specified when enabling logging.

InsufficientS3BucketPolicyFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The S3 bucket name is invalid. For more information about naming rules, go to Bucket Restrictions and Limitations in the Amazon Simple Storage Service (S3) Developer Guide.

InvalidS3BucketNameFault(String), ///

The string specified for the logging S3 key prefix does not comply with the documented constraints.

InvalidS3KeyPrefixFault(String), } impl EnableLoggingError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "BucketNotFoundFault" => { return RusotoError::Service(EnableLoggingError::BucketNotFoundFault( parsed_error.message, )) } "ClusterNotFound" => { return RusotoError::Service(EnableLoggingError::ClusterNotFoundFault( parsed_error.message, )) } "InsufficientS3BucketPolicyFault" => { return RusotoError::Service( EnableLoggingError::InsufficientS3BucketPolicyFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service(EnableLoggingError::InvalidClusterStateFault( parsed_error.message, )) } "InvalidS3BucketNameFault" => { return RusotoError::Service(EnableLoggingError::InvalidS3BucketNameFault( parsed_error.message, )) } "InvalidS3KeyPrefixFault" => { return RusotoError::Service(EnableLoggingError::InvalidS3KeyPrefixFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for EnableLoggingError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { EnableLoggingError::BucketNotFoundFault(ref cause) => write!(f, "{}", cause), EnableLoggingError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), EnableLoggingError::InsufficientS3BucketPolicyFault(ref cause) => { write!(f, "{}", cause) } EnableLoggingError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), EnableLoggingError::InvalidS3BucketNameFault(ref cause) => write!(f, "{}", cause), EnableLoggingError::InvalidS3KeyPrefixFault(ref cause) => write!(f, "{}", cause), } } } impl Error for EnableLoggingError {} /// Errors returned by EnableSnapshotCopy #[derive(Debug, PartialEq)] pub enum EnableSnapshotCopyError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

Cross-region snapshot copy was temporarily disabled. Try your request again.

CopyToRegionDisabledFault(String), ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

The specified options are incompatible.

IncompatibleOrderableOptions(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

LimitExceededFault(String), ///

The cluster already has cross-region snapshot copy enabled.

SnapshotCopyAlreadyEnabledFault(String), ///

The specified snapshot copy grant can't be found. Make sure that the name is typed correctly and that the grant exists in the destination region.

SnapshotCopyGrantNotFoundFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), ///

The specified region is incorrect or does not exist.

UnknownSnapshotCopyRegionFault(String), } impl EnableSnapshotCopyError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(EnableSnapshotCopyError::ClusterNotFoundFault( parsed_error.message, )) } "CopyToRegionDisabledFault" => { return RusotoError::Service( EnableSnapshotCopyError::CopyToRegionDisabledFault( parsed_error.message, ), ) } "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( EnableSnapshotCopyError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "IncompatibleOrderableOptions" => { return RusotoError::Service( EnableSnapshotCopyError::IncompatibleOrderableOptions( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service( EnableSnapshotCopyError::InvalidClusterStateFault(parsed_error.message), ) } "InvalidRetentionPeriodFault" => { return RusotoError::Service( EnableSnapshotCopyError::InvalidRetentionPeriodFault( parsed_error.message, ), ) } "LimitExceededFault" => { return RusotoError::Service(EnableSnapshotCopyError::LimitExceededFault( parsed_error.message, )) } "SnapshotCopyAlreadyEnabledFault" => { return RusotoError::Service( EnableSnapshotCopyError::SnapshotCopyAlreadyEnabledFault( parsed_error.message, ), ) } "SnapshotCopyGrantNotFoundFault" => { return RusotoError::Service( EnableSnapshotCopyError::SnapshotCopyGrantNotFoundFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( EnableSnapshotCopyError::UnauthorizedOperation(parsed_error.message), ) } "UnknownSnapshotCopyRegionFault" => { return RusotoError::Service( EnableSnapshotCopyError::UnknownSnapshotCopyRegionFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for EnableSnapshotCopyError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { EnableSnapshotCopyError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), EnableSnapshotCopyError::CopyToRegionDisabledFault(ref cause) => write!(f, "{}", cause), EnableSnapshotCopyError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } EnableSnapshotCopyError::IncompatibleOrderableOptions(ref cause) => { write!(f, "{}", cause) } EnableSnapshotCopyError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), EnableSnapshotCopyError::InvalidRetentionPeriodFault(ref cause) => { write!(f, "{}", cause) } EnableSnapshotCopyError::LimitExceededFault(ref cause) => write!(f, "{}", cause), EnableSnapshotCopyError::SnapshotCopyAlreadyEnabledFault(ref cause) => { write!(f, "{}", cause) } EnableSnapshotCopyError::SnapshotCopyGrantNotFoundFault(ref cause) => { write!(f, "{}", cause) } EnableSnapshotCopyError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), EnableSnapshotCopyError::UnknownSnapshotCopyRegionFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for EnableSnapshotCopyError {} /// Errors returned by GetClusterCredentials #[derive(Debug, PartialEq)] pub enum GetClusterCredentialsError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), } impl GetClusterCredentialsError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( GetClusterCredentialsError::ClusterNotFoundFault(parsed_error.message), ) } "UnsupportedOperation" => { return RusotoError::Service( GetClusterCredentialsError::UnsupportedOperationFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for GetClusterCredentialsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { GetClusterCredentialsError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), GetClusterCredentialsError::UnsupportedOperationFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for GetClusterCredentialsError {} /// Errors returned by GetReservedNodeExchangeOfferings #[derive(Debug, PartialEq)] pub enum GetReservedNodeExchangeOfferingsError { ///

Your request cannot be completed because a dependent internal service is temporarily unavailable. Wait 30 to 60 seconds and try again.

DependentServiceUnavailableFault(String), ///

Indicates that the Reserved Node being exchanged is not in an active state.

InvalidReservedNodeStateFault(String), ///

Indicates that the reserved node has already been exchanged.

ReservedNodeAlreadyMigratedFault(String), ///

The specified reserved compute node not found.

ReservedNodeNotFoundFault(String), ///

Specified offering does not exist.

ReservedNodeOfferingNotFoundFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), } impl GetReservedNodeExchangeOfferingsError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "DependentServiceUnavailableFault" => { return RusotoError::Service( GetReservedNodeExchangeOfferingsError::DependentServiceUnavailableFault( parsed_error.message, ), ) } "InvalidReservedNodeState" => { return RusotoError::Service( GetReservedNodeExchangeOfferingsError::InvalidReservedNodeStateFault( parsed_error.message, ), ) } "ReservedNodeAlreadyMigrated" => { return RusotoError::Service( GetReservedNodeExchangeOfferingsError::ReservedNodeAlreadyMigratedFault( parsed_error.message, ), ) } "ReservedNodeNotFound" => { return RusotoError::Service( GetReservedNodeExchangeOfferingsError::ReservedNodeNotFoundFault( parsed_error.message, ), ) } "ReservedNodeOfferingNotFound" => return RusotoError::Service( GetReservedNodeExchangeOfferingsError::ReservedNodeOfferingNotFoundFault( parsed_error.message, ), ), "UnsupportedOperation" => { return RusotoError::Service( GetReservedNodeExchangeOfferingsError::UnsupportedOperationFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for GetReservedNodeExchangeOfferingsError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { GetReservedNodeExchangeOfferingsError::DependentServiceUnavailableFault(ref cause) => { write!(f, "{}", cause) } GetReservedNodeExchangeOfferingsError::InvalidReservedNodeStateFault(ref cause) => { write!(f, "{}", cause) } GetReservedNodeExchangeOfferingsError::ReservedNodeAlreadyMigratedFault(ref cause) => { write!(f, "{}", cause) } GetReservedNodeExchangeOfferingsError::ReservedNodeNotFoundFault(ref cause) => { write!(f, "{}", cause) } GetReservedNodeExchangeOfferingsError::ReservedNodeOfferingNotFoundFault(ref cause) => { write!(f, "{}", cause) } GetReservedNodeExchangeOfferingsError::UnsupportedOperationFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for GetReservedNodeExchangeOfferingsError {} /// Errors returned by ModifyCluster #[derive(Debug, PartialEq)] pub enum ModifyClusterError { ///

The account already has a cluster with the given identifier.

ClusterAlreadyExistsFault(String), ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The parameter group name does not refer to an existing parameter group.

ClusterParameterGroupNotFoundFault(String), ///

The cluster security group name does not refer to an existing cluster security group.

ClusterSecurityGroupNotFoundFault(String), ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

There is no Amazon Redshift HSM client certificate with the specified identifier.

HsmClientCertificateNotFoundFault(String), ///

There is no Amazon Redshift HSM configuration with the specified identifier.

HsmConfigurationNotFoundFault(String), ///

The number of nodes specified exceeds the allotted capacity of the cluster.

InsufficientClusterCapacityFault(String), ///

The state of the cluster security group is not available.

InvalidClusterSecurityGroupStateFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The provided cluster track name is not valid.

InvalidClusterTrackFault(String), ///

The Elastic IP (EIP) is invalid or cannot be found.

InvalidElasticIpFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

LimitExceededFault(String), ///

The operation would exceed the number of nodes allowed for a cluster.

NumberOfNodesPerClusterLimitExceededFault(String), ///

The operation would exceed the number of nodes allotted to the account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

NumberOfNodesQuotaExceededFault(String), ///

The number of tables in the cluster exceeds the limit for the requested new cluster node type.

TableLimitExceededFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), ///

A request option was specified that is not supported.

UnsupportedOptionFault(String), } impl ModifyClusterError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterAlreadyExists" => { return RusotoError::Service(ModifyClusterError::ClusterAlreadyExistsFault( parsed_error.message, )) } "ClusterNotFound" => { return RusotoError::Service(ModifyClusterError::ClusterNotFoundFault( parsed_error.message, )) } "ClusterParameterGroupNotFound" => { return RusotoError::Service( ModifyClusterError::ClusterParameterGroupNotFoundFault( parsed_error.message, ), ) } "ClusterSecurityGroupNotFound" => { return RusotoError::Service( ModifyClusterError::ClusterSecurityGroupNotFoundFault( parsed_error.message, ), ) } "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( ModifyClusterError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "HsmClientCertificateNotFoundFault" => { return RusotoError::Service( ModifyClusterError::HsmClientCertificateNotFoundFault( parsed_error.message, ), ) } "HsmConfigurationNotFoundFault" => { return RusotoError::Service( ModifyClusterError::HsmConfigurationNotFoundFault(parsed_error.message), ) } "InsufficientClusterCapacity" => { return RusotoError::Service( ModifyClusterError::InsufficientClusterCapacityFault( parsed_error.message, ), ) } "InvalidClusterSecurityGroupState" => { return RusotoError::Service( ModifyClusterError::InvalidClusterSecurityGroupStateFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service(ModifyClusterError::InvalidClusterStateFault( parsed_error.message, )) } "InvalidClusterTrack" => { return RusotoError::Service(ModifyClusterError::InvalidClusterTrackFault( parsed_error.message, )) } "InvalidElasticIpFault" => { return RusotoError::Service(ModifyClusterError::InvalidElasticIpFault( parsed_error.message, )) } "InvalidRetentionPeriodFault" => { return RusotoError::Service( ModifyClusterError::InvalidRetentionPeriodFault(parsed_error.message), ) } "LimitExceededFault" => { return RusotoError::Service(ModifyClusterError::LimitExceededFault( parsed_error.message, )) } "NumberOfNodesPerClusterLimitExceeded" => { return RusotoError::Service( ModifyClusterError::NumberOfNodesPerClusterLimitExceededFault( parsed_error.message, ), ) } "NumberOfNodesQuotaExceeded" => { return RusotoError::Service( ModifyClusterError::NumberOfNodesQuotaExceededFault( parsed_error.message, ), ) } "TableLimitExceeded" => { return RusotoError::Service(ModifyClusterError::TableLimitExceededFault( parsed_error.message, )) } "UnauthorizedOperation" => { return RusotoError::Service(ModifyClusterError::UnauthorizedOperation( parsed_error.message, )) } "UnsupportedOptionFault" => { return RusotoError::Service(ModifyClusterError::UnsupportedOptionFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyClusterError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyClusterError::ClusterAlreadyExistsFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::ClusterParameterGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterError::ClusterSecurityGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterError::HsmClientCertificateNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterError::HsmConfigurationNotFoundFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::InsufficientClusterCapacityFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterError::InvalidClusterSecurityGroupStateFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::InvalidClusterTrackFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::InvalidElasticIpFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::InvalidRetentionPeriodFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::LimitExceededFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::NumberOfNodesPerClusterLimitExceededFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterError::NumberOfNodesQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterError::TableLimitExceededFault(ref cause) => write!(f, "{}", cause), ModifyClusterError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), ModifyClusterError::UnsupportedOptionFault(ref cause) => write!(f, "{}", cause), } } } impl Error for ModifyClusterError {} /// Errors returned by ModifyClusterDbRevision #[derive(Debug, PartialEq)] pub enum ModifyClusterDbRevisionError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

Cluster is already on the latest database revision.

ClusterOnLatestRevisionFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), } impl ModifyClusterDbRevisionError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( ModifyClusterDbRevisionError::ClusterNotFoundFault( parsed_error.message, ), ) } "ClusterOnLatestRevision" => { return RusotoError::Service( ModifyClusterDbRevisionError::ClusterOnLatestRevisionFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service( ModifyClusterDbRevisionError::InvalidClusterStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyClusterDbRevisionError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyClusterDbRevisionError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), ModifyClusterDbRevisionError::ClusterOnLatestRevisionFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterDbRevisionError::InvalidClusterStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifyClusterDbRevisionError {} /// Errors returned by ModifyClusterIamRoles #[derive(Debug, PartialEq)] pub enum ModifyClusterIamRolesError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), } impl ModifyClusterIamRolesError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( ModifyClusterIamRolesError::ClusterNotFoundFault(parsed_error.message), ) } "InvalidClusterState" => { return RusotoError::Service( ModifyClusterIamRolesError::InvalidClusterStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyClusterIamRolesError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyClusterIamRolesError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), ModifyClusterIamRolesError::InvalidClusterStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifyClusterIamRolesError {} /// Errors returned by ModifyClusterMaintenance #[derive(Debug, PartialEq)] pub enum ModifyClusterMaintenanceError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), } impl ModifyClusterMaintenanceError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( ModifyClusterMaintenanceError::ClusterNotFoundFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service( ModifyClusterMaintenanceError::InvalidClusterStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyClusterMaintenanceError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyClusterMaintenanceError::ClusterNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterMaintenanceError::InvalidClusterStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifyClusterMaintenanceError {} /// Errors returned by ModifyClusterParameterGroup #[derive(Debug, PartialEq)] pub enum ModifyClusterParameterGroupError { ///

The parameter group name does not refer to an existing parameter group.

ClusterParameterGroupNotFoundFault(String), ///

The cluster parameter group action can not be completed because another task is in progress that involves the parameter group. Wait a few moments and try the operation again.

InvalidClusterParameterGroupStateFault(String), } impl ModifyClusterParameterGroupError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterParameterGroupNotFound" => { return RusotoError::Service( ModifyClusterParameterGroupError::ClusterParameterGroupNotFoundFault( parsed_error.message, ), ) } "InvalidClusterParameterGroupState" => return RusotoError::Service( ModifyClusterParameterGroupError::InvalidClusterParameterGroupStateFault( parsed_error.message, ), ), _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyClusterParameterGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyClusterParameterGroupError::ClusterParameterGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterParameterGroupError::InvalidClusterParameterGroupStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifyClusterParameterGroupError {} /// Errors returned by ModifyClusterSnapshot #[derive(Debug, PartialEq)] pub enum ModifyClusterSnapshotError { ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), ///

The specified cluster snapshot is not in the available state, or other accounts are authorized to access the snapshot.

InvalidClusterSnapshotStateFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), } impl ModifyClusterSnapshotError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSnapshotNotFound" => { return RusotoError::Service( ModifyClusterSnapshotError::ClusterSnapshotNotFoundFault( parsed_error.message, ), ) } "InvalidClusterSnapshotState" => { return RusotoError::Service( ModifyClusterSnapshotError::InvalidClusterSnapshotStateFault( parsed_error.message, ), ) } "InvalidRetentionPeriodFault" => { return RusotoError::Service( ModifyClusterSnapshotError::InvalidRetentionPeriodFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyClusterSnapshotError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyClusterSnapshotError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterSnapshotError::InvalidClusterSnapshotStateFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterSnapshotError::InvalidRetentionPeriodFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifyClusterSnapshotError {} /// Errors returned by ModifyClusterSnapshotSchedule #[derive(Debug, PartialEq)] pub enum ModifyClusterSnapshotScheduleError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The cluster snapshot schedule state is not valid.

InvalidClusterSnapshotScheduleStateFault(String), ///

We could not find the specified snapshot schedule.

SnapshotScheduleNotFoundFault(String), } impl ModifyClusterSnapshotScheduleError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => return RusotoError::Service(ModifyClusterSnapshotScheduleError::ClusterNotFoundFault(parsed_error.message)),"InvalidClusterSnapshotScheduleState" => return RusotoError::Service(ModifyClusterSnapshotScheduleError::InvalidClusterSnapshotScheduleStateFault(parsed_error.message)),"SnapshotScheduleNotFound" => return RusotoError::Service(ModifyClusterSnapshotScheduleError::SnapshotScheduleNotFoundFault(parsed_error.message)),_ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyClusterSnapshotScheduleError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyClusterSnapshotScheduleError::ClusterNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterSnapshotScheduleError::InvalidClusterSnapshotScheduleStateFault( ref cause, ) => write!(f, "{}", cause), ModifyClusterSnapshotScheduleError::SnapshotScheduleNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifyClusterSnapshotScheduleError {} /// Errors returned by ModifyClusterSubnetGroup #[derive(Debug, PartialEq)] pub enum ModifyClusterSubnetGroupError { ///

The cluster subnet group name does not refer to an existing cluster subnet group.

ClusterSubnetGroupNotFoundFault(String), ///

The request would result in user exceeding the allowed number of subnets in a cluster subnet groups. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

ClusterSubnetQuotaExceededFault(String), ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

The requested subnet is not valid, or not all of the subnets are in the same VPC.

InvalidSubnet(String), ///

A specified subnet is already in use by another cluster.

SubnetAlreadyInUse(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl ModifyClusterSubnetGroupError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterSubnetGroupNotFoundFault" => { return RusotoError::Service( ModifyClusterSubnetGroupError::ClusterSubnetGroupNotFoundFault( parsed_error.message, ), ) } "ClusterSubnetQuotaExceededFault" => { return RusotoError::Service( ModifyClusterSubnetGroupError::ClusterSubnetQuotaExceededFault( parsed_error.message, ), ) } "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( ModifyClusterSubnetGroupError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "InvalidSubnet" => { return RusotoError::Service(ModifyClusterSubnetGroupError::InvalidSubnet( parsed_error.message, )) } "SubnetAlreadyInUse" => { return RusotoError::Service( ModifyClusterSubnetGroupError::SubnetAlreadyInUse(parsed_error.message), ) } "UnauthorizedOperation" => { return RusotoError::Service( ModifyClusterSubnetGroupError::UnauthorizedOperation( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyClusterSubnetGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyClusterSubnetGroupError::ClusterSubnetGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterSubnetGroupError::ClusterSubnetQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterSubnetGroupError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } ModifyClusterSubnetGroupError::InvalidSubnet(ref cause) => write!(f, "{}", cause), ModifyClusterSubnetGroupError::SubnetAlreadyInUse(ref cause) => write!(f, "{}", cause), ModifyClusterSubnetGroupError::UnauthorizedOperation(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifyClusterSubnetGroupError {} /// Errors returned by ModifyEventSubscription #[derive(Debug, PartialEq)] pub enum ModifyEventSubscriptionError { ///

The subscription request is invalid because it is a duplicate request. This subscription request is already in progress.

InvalidSubscriptionStateFault(String), ///

Amazon SNS has responded that there is a problem with the specified Amazon SNS topic.

SNSInvalidTopicFault(String), ///

You do not have permission to publish to the specified Amazon SNS topic.

SNSNoAuthorizationFault(String), ///

An Amazon SNS topic with the specified Amazon Resource Name (ARN) does not exist.

SNSTopicArnNotFoundFault(String), ///

The specified Amazon Redshift event source could not be found.

SourceNotFoundFault(String), ///

The value specified for the event category was not one of the allowed values, or it specified a category that does not apply to the specified source type. The allowed values are Configuration, Management, Monitoring, and Security.

SubscriptionCategoryNotFoundFault(String), ///

An Amazon Redshift event with the specified event ID does not exist.

SubscriptionEventIdNotFoundFault(String), ///

An Amazon Redshift event notification subscription with the specified name does not exist.

SubscriptionNotFoundFault(String), ///

The value specified for the event severity was not one of the allowed values, or it specified a severity that does not apply to the specified source type. The allowed values are ERROR and INFO.

SubscriptionSeverityNotFoundFault(String), } impl ModifyEventSubscriptionError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidSubscriptionStateFault" => { return RusotoError::Service( ModifyEventSubscriptionError::InvalidSubscriptionStateFault( parsed_error.message, ), ) } "SNSInvalidTopic" => { return RusotoError::Service( ModifyEventSubscriptionError::SNSInvalidTopicFault( parsed_error.message, ), ) } "SNSNoAuthorization" => { return RusotoError::Service( ModifyEventSubscriptionError::SNSNoAuthorizationFault( parsed_error.message, ), ) } "SNSTopicArnNotFound" => { return RusotoError::Service( ModifyEventSubscriptionError::SNSTopicArnNotFoundFault( parsed_error.message, ), ) } "SourceNotFound" => { return RusotoError::Service( ModifyEventSubscriptionError::SourceNotFoundFault(parsed_error.message), ) } "SubscriptionCategoryNotFound" => { return RusotoError::Service( ModifyEventSubscriptionError::SubscriptionCategoryNotFoundFault( parsed_error.message, ), ) } "SubscriptionEventIdNotFound" => { return RusotoError::Service( ModifyEventSubscriptionError::SubscriptionEventIdNotFoundFault( parsed_error.message, ), ) } "SubscriptionNotFound" => { return RusotoError::Service( ModifyEventSubscriptionError::SubscriptionNotFoundFault( parsed_error.message, ), ) } "SubscriptionSeverityNotFound" => { return RusotoError::Service( ModifyEventSubscriptionError::SubscriptionSeverityNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyEventSubscriptionError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyEventSubscriptionError::InvalidSubscriptionStateFault(ref cause) => { write!(f, "{}", cause) } ModifyEventSubscriptionError::SNSInvalidTopicFault(ref cause) => write!(f, "{}", cause), ModifyEventSubscriptionError::SNSNoAuthorizationFault(ref cause) => { write!(f, "{}", cause) } ModifyEventSubscriptionError::SNSTopicArnNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyEventSubscriptionError::SourceNotFoundFault(ref cause) => write!(f, "{}", cause), ModifyEventSubscriptionError::SubscriptionCategoryNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyEventSubscriptionError::SubscriptionEventIdNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyEventSubscriptionError::SubscriptionNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyEventSubscriptionError::SubscriptionSeverityNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifyEventSubscriptionError {} /// Errors returned by ModifyScheduledAction #[derive(Debug, PartialEq)] pub enum ModifyScheduledActionError { ///

The schedule you submitted isn't valid.

InvalidScheduleFault(String), ///

The scheduled action is not valid.

InvalidScheduledActionFault(String), ///

The scheduled action cannot be found.

ScheduledActionNotFoundFault(String), ///

The action type specified for a scheduled action is not supported.

ScheduledActionTypeUnsupportedFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl ModifyScheduledActionError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidSchedule" => { return RusotoError::Service( ModifyScheduledActionError::InvalidScheduleFault(parsed_error.message), ) } "InvalidScheduledAction" => { return RusotoError::Service( ModifyScheduledActionError::InvalidScheduledActionFault( parsed_error.message, ), ) } "ScheduledActionNotFound" => { return RusotoError::Service( ModifyScheduledActionError::ScheduledActionNotFoundFault( parsed_error.message, ), ) } "ScheduledActionTypeUnsupported" => { return RusotoError::Service( ModifyScheduledActionError::ScheduledActionTypeUnsupportedFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( ModifyScheduledActionError::UnauthorizedOperation(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyScheduledActionError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyScheduledActionError::InvalidScheduleFault(ref cause) => write!(f, "{}", cause), ModifyScheduledActionError::InvalidScheduledActionFault(ref cause) => { write!(f, "{}", cause) } ModifyScheduledActionError::ScheduledActionNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifyScheduledActionError::ScheduledActionTypeUnsupportedFault(ref cause) => { write!(f, "{}", cause) } ModifyScheduledActionError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), } } } impl Error for ModifyScheduledActionError {} /// Errors returned by ModifySnapshotCopyRetentionPeriod #[derive(Debug, PartialEq)] pub enum ModifySnapshotCopyRetentionPeriodError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The retention period specified is either in the past or is not a valid value.

The value must be either -1 or an integer between 1 and 3,653.

InvalidRetentionPeriodFault(String), ///

Cross-region snapshot copy was temporarily disabled. Try your request again.

SnapshotCopyDisabledFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl ModifySnapshotCopyRetentionPeriodError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( ModifySnapshotCopyRetentionPeriodError::ClusterNotFoundFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service( ModifySnapshotCopyRetentionPeriodError::InvalidClusterStateFault( parsed_error.message, ), ) } "InvalidRetentionPeriodFault" => { return RusotoError::Service( ModifySnapshotCopyRetentionPeriodError::InvalidRetentionPeriodFault( parsed_error.message, ), ) } "SnapshotCopyDisabledFault" => { return RusotoError::Service( ModifySnapshotCopyRetentionPeriodError::SnapshotCopyDisabledFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( ModifySnapshotCopyRetentionPeriodError::UnauthorizedOperation( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifySnapshotCopyRetentionPeriodError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifySnapshotCopyRetentionPeriodError::ClusterNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifySnapshotCopyRetentionPeriodError::InvalidClusterStateFault(ref cause) => { write!(f, "{}", cause) } ModifySnapshotCopyRetentionPeriodError::InvalidRetentionPeriodFault(ref cause) => { write!(f, "{}", cause) } ModifySnapshotCopyRetentionPeriodError::SnapshotCopyDisabledFault(ref cause) => { write!(f, "{}", cause) } ModifySnapshotCopyRetentionPeriodError::UnauthorizedOperation(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifySnapshotCopyRetentionPeriodError {} /// Errors returned by ModifySnapshotSchedule #[derive(Debug, PartialEq)] pub enum ModifySnapshotScheduleError { ///

The schedule you submitted isn't valid.

InvalidScheduleFault(String), ///

We could not find the specified snapshot schedule.

SnapshotScheduleNotFoundFault(String), ///

The specified snapshot schedule is already being updated.

SnapshotScheduleUpdateInProgressFault(String), } impl ModifySnapshotScheduleError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidSchedule" => { return RusotoError::Service( ModifySnapshotScheduleError::InvalidScheduleFault(parsed_error.message), ) } "SnapshotScheduleNotFound" => { return RusotoError::Service( ModifySnapshotScheduleError::SnapshotScheduleNotFoundFault( parsed_error.message, ), ) } "SnapshotScheduleUpdateInProgress" => { return RusotoError::Service( ModifySnapshotScheduleError::SnapshotScheduleUpdateInProgressFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifySnapshotScheduleError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifySnapshotScheduleError::InvalidScheduleFault(ref cause) => write!(f, "{}", cause), ModifySnapshotScheduleError::SnapshotScheduleNotFoundFault(ref cause) => { write!(f, "{}", cause) } ModifySnapshotScheduleError::SnapshotScheduleUpdateInProgressFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ModifySnapshotScheduleError {} /// Errors returned by ModifyUsageLimit #[derive(Debug, PartialEq)] pub enum ModifyUsageLimitError { ///

The usage limit is not valid.

InvalidUsageLimitFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), ///

The usage limit identifier can't be found.

UsageLimitNotFoundFault(String), } impl ModifyUsageLimitError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "InvalidUsageLimit" => { return RusotoError::Service(ModifyUsageLimitError::InvalidUsageLimitFault( parsed_error.message, )) } "UnsupportedOperation" => { return RusotoError::Service( ModifyUsageLimitError::UnsupportedOperationFault(parsed_error.message), ) } "UsageLimitNotFound" => { return RusotoError::Service( ModifyUsageLimitError::UsageLimitNotFoundFault(parsed_error.message), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ModifyUsageLimitError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ModifyUsageLimitError::InvalidUsageLimitFault(ref cause) => write!(f, "{}", cause), ModifyUsageLimitError::UnsupportedOperationFault(ref cause) => write!(f, "{}", cause), ModifyUsageLimitError::UsageLimitNotFoundFault(ref cause) => write!(f, "{}", cause), } } } impl Error for ModifyUsageLimitError {} /// Errors returned by PauseCluster #[derive(Debug, PartialEq)] pub enum PauseClusterError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), } impl PauseClusterError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(PauseClusterError::ClusterNotFoundFault( parsed_error.message, )) } "InvalidClusterState" => { return RusotoError::Service(PauseClusterError::InvalidClusterStateFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for PauseClusterError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { PauseClusterError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), PauseClusterError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), } } } impl Error for PauseClusterError {} /// Errors returned by PurchaseReservedNodeOffering #[derive(Debug, PartialEq)] pub enum PurchaseReservedNodeOfferingError { ///

User already has a reservation with the given identifier.

ReservedNodeAlreadyExistsFault(String), ///

Specified offering does not exist.

ReservedNodeOfferingNotFoundFault(String), ///

Request would exceed the user's compute node quota. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

ReservedNodeQuotaExceededFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), } impl PurchaseReservedNodeOfferingError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ReservedNodeAlreadyExists" => { return RusotoError::Service( PurchaseReservedNodeOfferingError::ReservedNodeAlreadyExistsFault( parsed_error.message, ), ) } "ReservedNodeOfferingNotFound" => { return RusotoError::Service( PurchaseReservedNodeOfferingError::ReservedNodeOfferingNotFoundFault( parsed_error.message, ), ) } "ReservedNodeQuotaExceeded" => { return RusotoError::Service( PurchaseReservedNodeOfferingError::ReservedNodeQuotaExceededFault( parsed_error.message, ), ) } "UnsupportedOperation" => { return RusotoError::Service( PurchaseReservedNodeOfferingError::UnsupportedOperationFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for PurchaseReservedNodeOfferingError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { PurchaseReservedNodeOfferingError::ReservedNodeAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } PurchaseReservedNodeOfferingError::ReservedNodeOfferingNotFoundFault(ref cause) => { write!(f, "{}", cause) } PurchaseReservedNodeOfferingError::ReservedNodeQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } PurchaseReservedNodeOfferingError::UnsupportedOperationFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for PurchaseReservedNodeOfferingError {} /// Errors returned by RebootCluster #[derive(Debug, PartialEq)] pub enum RebootClusterError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), } impl RebootClusterError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(RebootClusterError::ClusterNotFoundFault( parsed_error.message, )) } "InvalidClusterState" => { return RusotoError::Service(RebootClusterError::InvalidClusterStateFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for RebootClusterError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RebootClusterError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), RebootClusterError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), } } } impl Error for RebootClusterError {} /// Errors returned by ResetClusterParameterGroup #[derive(Debug, PartialEq)] pub enum ResetClusterParameterGroupError { ///

The parameter group name does not refer to an existing parameter group.

ClusterParameterGroupNotFoundFault(String), ///

The cluster parameter group action can not be completed because another task is in progress that involves the parameter group. Wait a few moments and try the operation again.

InvalidClusterParameterGroupStateFault(String), } impl ResetClusterParameterGroupError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterParameterGroupNotFound" => { return RusotoError::Service( ResetClusterParameterGroupError::ClusterParameterGroupNotFoundFault( parsed_error.message, ), ) } "InvalidClusterParameterGroupState" => { return RusotoError::Service( ResetClusterParameterGroupError::InvalidClusterParameterGroupStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ResetClusterParameterGroupError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ResetClusterParameterGroupError::ClusterParameterGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } ResetClusterParameterGroupError::InvalidClusterParameterGroupStateFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for ResetClusterParameterGroupError {} /// Errors returned by ResizeCluster #[derive(Debug, PartialEq)] pub enum ResizeClusterError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The number of nodes specified exceeds the allotted capacity of the cluster.

InsufficientClusterCapacityFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

LimitExceededFault(String), ///

The operation would exceed the number of nodes allowed for a cluster.

NumberOfNodesPerClusterLimitExceededFault(String), ///

The operation would exceed the number of nodes allotted to the account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

NumberOfNodesQuotaExceededFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), ///

A request option was specified that is not supported.

UnsupportedOptionFault(String), } impl ResizeClusterError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(ResizeClusterError::ClusterNotFoundFault( parsed_error.message, )) } "InsufficientClusterCapacity" => { return RusotoError::Service( ResizeClusterError::InsufficientClusterCapacityFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service(ResizeClusterError::InvalidClusterStateFault( parsed_error.message, )) } "LimitExceededFault" => { return RusotoError::Service(ResizeClusterError::LimitExceededFault( parsed_error.message, )) } "NumberOfNodesPerClusterLimitExceeded" => { return RusotoError::Service( ResizeClusterError::NumberOfNodesPerClusterLimitExceededFault( parsed_error.message, ), ) } "NumberOfNodesQuotaExceeded" => { return RusotoError::Service( ResizeClusterError::NumberOfNodesQuotaExceededFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service(ResizeClusterError::UnauthorizedOperation( parsed_error.message, )) } "UnsupportedOperation" => { return RusotoError::Service(ResizeClusterError::UnsupportedOperationFault( parsed_error.message, )) } "UnsupportedOptionFault" => { return RusotoError::Service(ResizeClusterError::UnsupportedOptionFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ResizeClusterError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ResizeClusterError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), ResizeClusterError::InsufficientClusterCapacityFault(ref cause) => { write!(f, "{}", cause) } ResizeClusterError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), ResizeClusterError::LimitExceededFault(ref cause) => write!(f, "{}", cause), ResizeClusterError::NumberOfNodesPerClusterLimitExceededFault(ref cause) => { write!(f, "{}", cause) } ResizeClusterError::NumberOfNodesQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } ResizeClusterError::UnauthorizedOperation(ref cause) => write!(f, "{}", cause), ResizeClusterError::UnsupportedOperationFault(ref cause) => write!(f, "{}", cause), ResizeClusterError::UnsupportedOptionFault(ref cause) => write!(f, "{}", cause), } } } impl Error for ResizeClusterError {} /// Errors returned by RestoreFromClusterSnapshot #[derive(Debug, PartialEq)] pub enum RestoreFromClusterSnapshotError { ///

The owner of the specified snapshot has not authorized your account to access the snapshot.

AccessToSnapshotDeniedFault(String), ///

The account already has a cluster with the given identifier.

ClusterAlreadyExistsFault(String), ///

The parameter group name does not refer to an existing parameter group.

ClusterParameterGroupNotFoundFault(String), ///

The request would exceed the allowed number of cluster instances for this account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

ClusterQuotaExceededFault(String), ///

The cluster security group name does not refer to an existing cluster security group.

ClusterSecurityGroupNotFoundFault(String), ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), ///

The cluster subnet group name does not refer to an existing cluster subnet group.

ClusterSubnetGroupNotFoundFault(String), ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

There is no Amazon Redshift HSM client certificate with the specified identifier.

HsmClientCertificateNotFoundFault(String), ///

There is no Amazon Redshift HSM configuration with the specified identifier.

HsmConfigurationNotFoundFault(String), ///

The number of nodes specified exceeds the allotted capacity of the cluster.

InsufficientClusterCapacityFault(String), ///

The specified cluster snapshot is not in the available state, or other accounts are authorized to access the snapshot.

InvalidClusterSnapshotStateFault(String), ///

The cluster subnet group cannot be deleted because it is in use.

InvalidClusterSubnetGroupStateFault(String), ///

The provided cluster track name is not valid.

InvalidClusterTrackFault(String), ///

The Elastic IP (EIP) is invalid or cannot be found.

InvalidElasticIpFault(String), ///

The restore is invalid.

InvalidRestoreFault(String), ///

The requested subnet is not valid, or not all of the subnets are in the same VPC.

InvalidSubnet(String), ///

The tag is invalid.

InvalidTagFault(String), ///

The cluster subnet group does not cover all Availability Zones.

InvalidVPCNetworkStateFault(String), ///

The encryption key has exceeded its grant limit in AWS KMS.

LimitExceededFault(String), ///

The operation would exceed the number of nodes allowed for a cluster.

NumberOfNodesPerClusterLimitExceededFault(String), ///

The operation would exceed the number of nodes allotted to the account. For information about increasing your quota, go to Limits in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

NumberOfNodesQuotaExceededFault(String), ///

We could not find the specified snapshot schedule.

SnapshotScheduleNotFoundFault(String), ///

You have exceeded the number of tags allowed.

TagLimitExceededFault(String), ///

Your account is not authorized to perform the requested operation.

UnauthorizedOperation(String), } impl RestoreFromClusterSnapshotError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "AccessToSnapshotDenied" => { return RusotoError::Service( RestoreFromClusterSnapshotError::AccessToSnapshotDeniedFault( parsed_error.message, ), ) } "ClusterAlreadyExists" => { return RusotoError::Service( RestoreFromClusterSnapshotError::ClusterAlreadyExistsFault( parsed_error.message, ), ) } "ClusterParameterGroupNotFound" => { return RusotoError::Service( RestoreFromClusterSnapshotError::ClusterParameterGroupNotFoundFault( parsed_error.message, ), ) } "ClusterQuotaExceeded" => { return RusotoError::Service( RestoreFromClusterSnapshotError::ClusterQuotaExceededFault( parsed_error.message, ), ) } "ClusterSecurityGroupNotFound" => { return RusotoError::Service( RestoreFromClusterSnapshotError::ClusterSecurityGroupNotFoundFault( parsed_error.message, ), ) } "ClusterSnapshotNotFound" => { return RusotoError::Service( RestoreFromClusterSnapshotError::ClusterSnapshotNotFoundFault( parsed_error.message, ), ) } "ClusterSubnetGroupNotFoundFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::ClusterSubnetGroupNotFoundFault( parsed_error.message, ), ) } "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "HsmClientCertificateNotFoundFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::HsmClientCertificateNotFoundFault( parsed_error.message, ), ) } "HsmConfigurationNotFoundFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::HsmConfigurationNotFoundFault( parsed_error.message, ), ) } "InsufficientClusterCapacity" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InsufficientClusterCapacityFault( parsed_error.message, ), ) } "InvalidClusterSnapshotState" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidClusterSnapshotStateFault( parsed_error.message, ), ) } "InvalidClusterSubnetGroupStateFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidClusterSubnetGroupStateFault( parsed_error.message, ), ) } "InvalidClusterTrack" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidClusterTrackFault( parsed_error.message, ), ) } "InvalidElasticIpFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidElasticIpFault( parsed_error.message, ), ) } "InvalidRestore" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidRestoreFault( parsed_error.message, ), ) } "InvalidSubnet" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidSubnet(parsed_error.message), ) } "InvalidTagFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidTagFault(parsed_error.message), ) } "InvalidVPCNetworkStateFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::InvalidVPCNetworkStateFault( parsed_error.message, ), ) } "LimitExceededFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::LimitExceededFault( parsed_error.message, ), ) } "NumberOfNodesPerClusterLimitExceeded" => return RusotoError::Service( RestoreFromClusterSnapshotError::NumberOfNodesPerClusterLimitExceededFault( parsed_error.message, ), ), "NumberOfNodesQuotaExceeded" => { return RusotoError::Service( RestoreFromClusterSnapshotError::NumberOfNodesQuotaExceededFault( parsed_error.message, ), ) } "SnapshotScheduleNotFound" => { return RusotoError::Service( RestoreFromClusterSnapshotError::SnapshotScheduleNotFoundFault( parsed_error.message, ), ) } "TagLimitExceededFault" => { return RusotoError::Service( RestoreFromClusterSnapshotError::TagLimitExceededFault( parsed_error.message, ), ) } "UnauthorizedOperation" => { return RusotoError::Service( RestoreFromClusterSnapshotError::UnauthorizedOperation( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for RestoreFromClusterSnapshotError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RestoreFromClusterSnapshotError::AccessToSnapshotDeniedFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::ClusterAlreadyExistsFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::ClusterParameterGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::ClusterQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::ClusterSecurityGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::ClusterSubnetGroupNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::HsmClientCertificateNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::HsmConfigurationNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::InsufficientClusterCapacityFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::InvalidClusterSnapshotStateFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::InvalidClusterSubnetGroupStateFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::InvalidClusterTrackFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::InvalidElasticIpFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::InvalidRestoreFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::InvalidSubnet(ref cause) => write!(f, "{}", cause), RestoreFromClusterSnapshotError::InvalidTagFault(ref cause) => write!(f, "{}", cause), RestoreFromClusterSnapshotError::InvalidVPCNetworkStateFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::LimitExceededFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::NumberOfNodesPerClusterLimitExceededFault( ref cause, ) => write!(f, "{}", cause), RestoreFromClusterSnapshotError::NumberOfNodesQuotaExceededFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::SnapshotScheduleNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::TagLimitExceededFault(ref cause) => { write!(f, "{}", cause) } RestoreFromClusterSnapshotError::UnauthorizedOperation(ref cause) => { write!(f, "{}", cause) } } } } impl Error for RestoreFromClusterSnapshotError {} /// Errors returned by RestoreTableFromClusterSnapshot #[derive(Debug, PartialEq)] pub enum RestoreTableFromClusterSnapshotError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), ///

You have exceeded the allowed number of table restore requests. Wait for your current table restore requests to complete before making a new request.

InProgressTableRestoreQuotaExceededFault(String), ///

The specified cluster snapshot is not in the available state, or other accounts are authorized to access the snapshot.

InvalidClusterSnapshotStateFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), ///

The value specified for the sourceDatabaseName, sourceSchemaName, or sourceTableName parameter, or a combination of these, doesn't exist in the snapshot.

InvalidTableRestoreArgumentFault(String), ///

The requested operation isn't supported.

UnsupportedOperationFault(String), } impl RestoreTableFromClusterSnapshotError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => return RusotoError::Service(RestoreTableFromClusterSnapshotError::ClusterNotFoundFault(parsed_error.message)),"ClusterSnapshotNotFound" => return RusotoError::Service(RestoreTableFromClusterSnapshotError::ClusterSnapshotNotFoundFault(parsed_error.message)),"InProgressTableRestoreQuotaExceededFault" => return RusotoError::Service(RestoreTableFromClusterSnapshotError::InProgressTableRestoreQuotaExceededFault(parsed_error.message)),"InvalidClusterSnapshotState" => return RusotoError::Service(RestoreTableFromClusterSnapshotError::InvalidClusterSnapshotStateFault(parsed_error.message)),"InvalidClusterState" => return RusotoError::Service(RestoreTableFromClusterSnapshotError::InvalidClusterStateFault(parsed_error.message)),"InvalidTableRestoreArgument" => return RusotoError::Service(RestoreTableFromClusterSnapshotError::InvalidTableRestoreArgumentFault(parsed_error.message)),"UnsupportedOperation" => return RusotoError::Service(RestoreTableFromClusterSnapshotError::UnsupportedOperationFault(parsed_error.message)),_ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for RestoreTableFromClusterSnapshotError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RestoreTableFromClusterSnapshotError::ClusterNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreTableFromClusterSnapshotError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } RestoreTableFromClusterSnapshotError::InProgressTableRestoreQuotaExceededFault( ref cause, ) => write!(f, "{}", cause), RestoreTableFromClusterSnapshotError::InvalidClusterSnapshotStateFault(ref cause) => { write!(f, "{}", cause) } RestoreTableFromClusterSnapshotError::InvalidClusterStateFault(ref cause) => { write!(f, "{}", cause) } RestoreTableFromClusterSnapshotError::InvalidTableRestoreArgumentFault(ref cause) => { write!(f, "{}", cause) } RestoreTableFromClusterSnapshotError::UnsupportedOperationFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for RestoreTableFromClusterSnapshotError {} /// Errors returned by ResumeCluster #[derive(Debug, PartialEq)] pub enum ResumeClusterError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), } impl ResumeClusterError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service(ResumeClusterError::ClusterNotFoundFault( parsed_error.message, )) } "InvalidClusterState" => { return RusotoError::Service(ResumeClusterError::InvalidClusterStateFault( parsed_error.message, )) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for ResumeClusterError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ResumeClusterError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), ResumeClusterError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), } } } impl Error for ResumeClusterError {} /// Errors returned by RevokeClusterSecurityGroupIngress #[derive(Debug, PartialEq)] pub enum RevokeClusterSecurityGroupIngressError { ///

The specified CIDR IP range or EC2 security group is not authorized for the specified cluster security group.

AuthorizationNotFoundFault(String), ///

The cluster security group name does not refer to an existing cluster security group.

ClusterSecurityGroupNotFoundFault(String), ///

The state of the cluster security group is not available.

InvalidClusterSecurityGroupStateFault(String), } impl RevokeClusterSecurityGroupIngressError { pub fn from_response( res: BufferedHttpResponse, ) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "AuthorizationNotFound" => return RusotoError::Service(RevokeClusterSecurityGroupIngressError::AuthorizationNotFoundFault(parsed_error.message)),"ClusterSecurityGroupNotFound" => return RusotoError::Service(RevokeClusterSecurityGroupIngressError::ClusterSecurityGroupNotFoundFault(parsed_error.message)),"InvalidClusterSecurityGroupState" => return RusotoError::Service(RevokeClusterSecurityGroupIngressError::InvalidClusterSecurityGroupStateFault(parsed_error.message)),_ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for RevokeClusterSecurityGroupIngressError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RevokeClusterSecurityGroupIngressError::AuthorizationNotFoundFault(ref cause) => { write!(f, "{}", cause) } RevokeClusterSecurityGroupIngressError::ClusterSecurityGroupNotFoundFault( ref cause, ) => write!(f, "{}", cause), RevokeClusterSecurityGroupIngressError::InvalidClusterSecurityGroupStateFault( ref cause, ) => write!(f, "{}", cause), } } } impl Error for RevokeClusterSecurityGroupIngressError {} /// Errors returned by RevokeSnapshotAccess #[derive(Debug, PartialEq)] pub enum RevokeSnapshotAccessError { ///

The owner of the specified snapshot has not authorized your account to access the snapshot.

AccessToSnapshotDeniedFault(String), ///

The specified CIDR IP range or EC2 security group is not authorized for the specified cluster security group.

AuthorizationNotFoundFault(String), ///

The snapshot identifier does not refer to an existing cluster snapshot.

ClusterSnapshotNotFoundFault(String), } impl RevokeSnapshotAccessError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "AccessToSnapshotDenied" => { return RusotoError::Service( RevokeSnapshotAccessError::AccessToSnapshotDeniedFault( parsed_error.message, ), ) } "AuthorizationNotFound" => { return RusotoError::Service( RevokeSnapshotAccessError::AuthorizationNotFoundFault( parsed_error.message, ), ) } "ClusterSnapshotNotFound" => { return RusotoError::Service( RevokeSnapshotAccessError::ClusterSnapshotNotFoundFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for RevokeSnapshotAccessError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RevokeSnapshotAccessError::AccessToSnapshotDeniedFault(ref cause) => { write!(f, "{}", cause) } RevokeSnapshotAccessError::AuthorizationNotFoundFault(ref cause) => { write!(f, "{}", cause) } RevokeSnapshotAccessError::ClusterSnapshotNotFoundFault(ref cause) => { write!(f, "{}", cause) } } } } impl Error for RevokeSnapshotAccessError {} /// Errors returned by RotateEncryptionKey #[derive(Debug, PartialEq)] pub enum RotateEncryptionKeyError { ///

The ClusterIdentifier parameter does not refer to an existing cluster.

ClusterNotFoundFault(String), ///

The request cannot be completed because a dependent service is throttling requests made by Amazon Redshift on your behalf. Wait and retry the request.

DependentServiceRequestThrottlingFault(String), ///

The specified cluster is not in the available state.

InvalidClusterStateFault(String), } impl RotateEncryptionKeyError { pub fn from_response(res: BufferedHttpResponse) -> RusotoError { { let reader = EventReader::new(res.body.as_ref()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); if let Ok(parsed_error) = Self::deserialize(&mut stack) { match &parsed_error.code[..] { "ClusterNotFound" => { return RusotoError::Service( RotateEncryptionKeyError::ClusterNotFoundFault(parsed_error.message), ) } "DependentServiceRequestThrottlingFault" => { return RusotoError::Service( RotateEncryptionKeyError::DependentServiceRequestThrottlingFault( parsed_error.message, ), ) } "InvalidClusterState" => { return RusotoError::Service( RotateEncryptionKeyError::InvalidClusterStateFault( parsed_error.message, ), ) } _ => {} } } } RusotoError::Unknown(res) } fn deserialize(stack: &mut T) -> Result where T: Peek + Next, { xml_util::start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl fmt::Display for RotateEncryptionKeyError { #[allow(unused_variables)] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { RotateEncryptionKeyError::ClusterNotFoundFault(ref cause) => write!(f, "{}", cause), RotateEncryptionKeyError::DependentServiceRequestThrottlingFault(ref cause) => { write!(f, "{}", cause) } RotateEncryptionKeyError::InvalidClusterStateFault(ref cause) => write!(f, "{}", cause), } } } impl Error for RotateEncryptionKeyError {} /// Trait representing the capabilities of the Amazon Redshift API. Amazon Redshift clients implement this trait. #[async_trait] pub trait Redshift { ///

Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to the configuration (term, payment type, or number of nodes) and no additional costs.

async fn accept_reserved_node_exchange( &self, input: AcceptReservedNodeExchangeInputMessage, ) -> Result>; ///

Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an Amazon EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.

If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift cluster must be in the same AWS Region.

If you authorize access to a CIDR/IP address range, specify CIDRIP. For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing.

You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide.

async fn authorize_cluster_security_group_ingress( &self, input: AuthorizeClusterSecurityGroupIngressMessage, ) -> Result< AuthorizeClusterSecurityGroupIngressResult, RusotoError, >; ///

Authorizes the specified AWS customer account to restore the specified snapshot.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn authorize_snapshot_access( &self, input: AuthorizeSnapshotAccessMessage, ) -> Result>; ///

Deletes a set of cluster snapshots.

async fn batch_delete_cluster_snapshots( &self, input: BatchDeleteClusterSnapshotsRequest, ) -> Result>; ///

Modifies the settings for a set of cluster snapshots.

async fn batch_modify_cluster_snapshots( &self, input: BatchModifyClusterSnapshotsMessage, ) -> Result< BatchModifyClusterSnapshotsOutputMessage, RusotoError, >; ///

Cancels a resize operation for a cluster.

async fn cancel_resize( &self, input: CancelResizeMessage, ) -> Result>; ///

Copies the specified automated cluster snapshot to a new manual cluster snapshot. The source must be an automated snapshot and it must be in the available state.

When you delete a cluster, Amazon Redshift deletes any automated snapshots of the cluster. Also, when the retention period of the snapshot expires, Amazon Redshift automatically deletes it. If you want to keep an automated snapshot for a longer period, you can make a manual copy of the snapshot. Manual snapshots are retained until you delete them.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn copy_cluster_snapshot( &self, input: CopyClusterSnapshotMessage, ) -> Result>; ///

Creates a new cluster with the specified parameters.

To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn create_cluster( &self, input: CreateClusterMessage, ) -> Result>; ///

Creates an Amazon Redshift parameter group.

Creating parameter groups is independent of creating clusters. You can associate a cluster with a parameter group when you create the cluster. You can also associate an existing cluster with a parameter group after the cluster is created by using ModifyCluster.

Parameters in the parameter group define specific behavior that applies to the databases you create on the cluster. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

async fn create_cluster_parameter_group( &self, input: CreateClusterParameterGroupMessage, ) -> Result>; ///

Creates a new Amazon Redshift security group. You use security groups to control access to non-VPC clusters.

For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

async fn create_cluster_security_group( &self, input: CreateClusterSecurityGroupMessage, ) -> Result>; ///

Creates a manual snapshot of the specified cluster. The cluster must be in the available state.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn create_cluster_snapshot( &self, input: CreateClusterSnapshotMessage, ) -> Result>; ///

Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.

For information about subnet groups, go to Amazon Redshift Cluster Subnet Groups in the Amazon Redshift Cluster Management Guide.

async fn create_cluster_subnet_group( &self, input: CreateClusterSubnetGroupMessage, ) -> Result>; ///

Creates an Amazon Redshift event notification subscription. This action requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the source type, and lists of Amazon Redshift source IDs, event categories, and event severities. Notifications will be sent for all events you want that match those criteria. For example, you can specify source type = cluster, source ID = my-cluster-1 and mycluster2, event categories = Availability, Backup, and severity = ERROR. The subscription will only send notifications for those ERROR events in the Availability and Backup categories for the specified clusters.

If you specify both the source type and source IDs, such as source type = cluster and source identifier = my-cluster-1, notifications will be sent for all the cluster events for my-cluster-1. If you specify a source type but do not specify a source identifier, you will receive notice of the events for the objects of that type in your AWS account. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all Amazon Redshift sources belonging to your AWS account. You must specify a source type if you specify a source ID.

async fn create_event_subscription( &self, input: CreateEventSubscriptionMessage, ) -> Result>; ///

Creates an HSM client certificate that an Amazon Redshift cluster will use to connect to the client's HSM in order to store and retrieve the keys used to encrypt the cluster databases.

The command returns a public key, which you must store in the HSM. In addition to creating the HSM certificate, you must create an Amazon Redshift HSM configuration that provides a cluster the information needed to store and use encryption keys in the HSM. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.

async fn create_hsm_client_certificate( &self, input: CreateHsmClientCertificateMessage, ) -> Result>; ///

Creates an HSM configuration that contains the information required by an Amazon Redshift cluster to store and use database encryption keys in a Hardware Security Module (HSM). After creating the HSM configuration, you can specify it as a parameter when creating a cluster. The cluster will then store its encryption keys in the HSM.

In addition to creating an HSM configuration, you must also create an HSM client certificate. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.

async fn create_hsm_configuration( &self, input: CreateHsmConfigurationMessage, ) -> Result>; ///

Creates a scheduled action. A scheduled action contains a schedule and an Amazon Redshift API action. For example, you can create a schedule of when to run the ResizeCluster API operation.

async fn create_scheduled_action( &self, input: CreateScheduledActionMessage, ) -> Result>; ///

Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied snapshots in a destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

async fn create_snapshot_copy_grant( &self, input: CreateSnapshotCopyGrantMessage, ) -> Result>; ///

Create a snapshot schedule that can be associated to a cluster and which overrides the default system backup schedule.

async fn create_snapshot_schedule( &self, input: CreateSnapshotScheduleMessage, ) -> Result>; ///

Adds tags to a cluster.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

async fn create_tags( &self, input: CreateTagsMessage, ) -> Result<(), RusotoError>; ///

Creates a usage limit for a specified Amazon Redshift feature on a cluster. The usage limit is identified by the returned usage limit identifier.

async fn create_usage_limit( &self, input: CreateUsageLimitMessage, ) -> Result>; ///

Deletes a previously provisioned cluster without its final snapshot being created. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be "final-snapshot" while the snapshot is being taken, then it's "deleting" once Amazon Redshift begins deleting the cluster.

For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn delete_cluster( &self, input: DeleteClusterMessage, ) -> Result>; ///

Deletes a specified Amazon Redshift parameter group.

You cannot delete a parameter group if it is associated with a cluster.

async fn delete_cluster_parameter_group( &self, input: DeleteClusterParameterGroupMessage, ) -> Result<(), RusotoError>; ///

Deletes an Amazon Redshift security group.

You cannot delete a security group that is associated with any clusters. You cannot delete the default security group.

For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

async fn delete_cluster_security_group( &self, input: DeleteClusterSecurityGroupMessage, ) -> Result<(), RusotoError>; ///

Deletes the specified manual snapshot. The snapshot must be in the available state, with no other users authorized to access the snapshot.

Unlike automated snapshots, manual snapshots are retained even after you delete your cluster. Amazon Redshift does not delete your manual snapshots. You must delete manual snapshot explicitly to avoid getting charged. If other accounts are authorized to access the snapshot, you must revoke all of the authorizations before you can delete the snapshot.

async fn delete_cluster_snapshot( &self, input: DeleteClusterSnapshotMessage, ) -> Result>; ///

Deletes the specified cluster subnet group.

async fn delete_cluster_subnet_group( &self, input: DeleteClusterSubnetGroupMessage, ) -> Result<(), RusotoError>; ///

Deletes an Amazon Redshift event notification subscription.

async fn delete_event_subscription( &self, input: DeleteEventSubscriptionMessage, ) -> Result<(), RusotoError>; ///

Deletes the specified HSM client certificate.

async fn delete_hsm_client_certificate( &self, input: DeleteHsmClientCertificateMessage, ) -> Result<(), RusotoError>; ///

Deletes the specified Amazon Redshift HSM configuration.

async fn delete_hsm_configuration( &self, input: DeleteHsmConfigurationMessage, ) -> Result<(), RusotoError>; ///

Deletes a scheduled action.

async fn delete_scheduled_action( &self, input: DeleteScheduledActionMessage, ) -> Result<(), RusotoError>; ///

Deletes the specified snapshot copy grant.

async fn delete_snapshot_copy_grant( &self, input: DeleteSnapshotCopyGrantMessage, ) -> Result<(), RusotoError>; ///

Deletes a snapshot schedule.

async fn delete_snapshot_schedule( &self, input: DeleteSnapshotScheduleMessage, ) -> Result<(), RusotoError>; ///

Deletes tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

async fn delete_tags( &self, input: DeleteTagsMessage, ) -> Result<(), RusotoError>; ///

Deletes a usage limit from a cluster.

async fn delete_usage_limit( &self, input: DeleteUsageLimitMessage, ) -> Result<(), RusotoError>; ///

Returns a list of attributes attached to an account

async fn describe_account_attributes( &self, input: DescribeAccountAttributesMessage, ) -> Result>; ///

Returns an array of ClusterDbRevision objects.

async fn describe_cluster_db_revisions( &self, input: DescribeClusterDbRevisionsMessage, ) -> Result>; ///

Returns a list of Amazon Redshift parameter groups, including parameter groups you created and the default parameter group. For each parameter group, the response includes the parameter group name, description, and parameter group family name. You can optionally specify a name to retrieve the description of a specific parameter group.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all parameter groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all parameter groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, parameter groups are returned regardless of whether they have tag keys or values associated with them.

async fn describe_cluster_parameter_groups( &self, input: DescribeClusterParameterGroupsMessage, ) -> Result>; ///

Returns a detailed list of parameters contained within the specified Amazon Redshift parameter group. For each parameter the response includes information such as parameter name, description, data type, value, whether the parameter value is modifiable, and so on.

You can specify source filter to retrieve parameters of only specific type. For example, to retrieve parameters that were modified by a user action such as from ModifyClusterParameterGroup, you can specify source equal to user.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

async fn describe_cluster_parameters( &self, input: DescribeClusterParametersMessage, ) -> Result>; ///

Returns information about Amazon Redshift security groups. If the name of a security group is specified, the response will contain only information about only that security group.

For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all security groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all security groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, security groups are returned regardless of whether they have tag keys or values associated with them.

async fn describe_cluster_security_groups( &self, input: DescribeClusterSecurityGroupsMessage, ) -> Result>; ///

Returns one or more snapshot objects, which contain metadata about your cluster snapshots. By default, this operation returns information about all snapshots of all clusters that are owned by you AWS customer account. No information is returned for snapshots owned by inactive AWS customer accounts.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all snapshots that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all snapshots that have any combination of those values are returned. Only snapshots that you own are returned in the response; shared snapshots are not returned with the tag key and tag value request parameters.

If both tag keys and values are omitted from the request, snapshots are returned regardless of whether they have tag keys or values associated with them.

async fn describe_cluster_snapshots( &self, input: DescribeClusterSnapshotsMessage, ) -> Result>; ///

Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in you AWS account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.

async fn describe_cluster_subnet_groups( &self, input: DescribeClusterSubnetGroupsMessage, ) -> Result>; ///

Returns a list of all the available maintenance tracks.

async fn describe_cluster_tracks( &self, input: DescribeClusterTracksMessage, ) -> Result>; ///

Returns descriptions of the available Amazon Redshift cluster versions. You can call this operation even before creating any clusters to learn more about the Amazon Redshift versions. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn describe_cluster_versions( &self, input: DescribeClusterVersionsMessage, ) -> Result>; ///

Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned.

If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.

async fn describe_clusters( &self, input: DescribeClustersMessage, ) -> Result>; ///

Returns a list of parameter settings for the specified parameter group family.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

async fn describe_default_cluster_parameters( &self, input: DescribeDefaultClusterParametersMessage, ) -> Result< DescribeDefaultClusterParametersResult, RusotoError, >; ///

Displays a list of event categories for all event source types, or for a specified source type. For a list of the event categories and source types, go to Amazon Redshift Event Notifications.

async fn describe_event_categories( &self, input: DescribeEventCategoriesMessage, ) -> Result>; ///

Lists descriptions of all the Amazon Redshift event notification subscriptions for a customer account. If you specify a subscription name, lists the description for that subscription.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all event notification subscriptions that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subscriptions that have any combination of those values are returned.

If both tag keys and values are omitted from the request, subscriptions are returned regardless of whether they have tag keys or values associated with them.

async fn describe_event_subscriptions( &self, input: DescribeEventSubscriptionsMessage, ) -> Result>; ///

Returns events related to clusters, security groups, snapshots, and parameter groups for the past 14 days. Events specific to a particular cluster, security group, snapshot or parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

async fn describe_events( &self, input: DescribeEventsMessage, ) -> Result>; ///

Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM client certificates that have any combination of those values are returned.

If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.

async fn describe_hsm_client_certificates( &self, input: DescribeHsmClientCertificatesMessage, ) -> Result>; ///

Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM connections that have any combination of those values are returned.

If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.

async fn describe_hsm_configurations( &self, input: DescribeHsmConfigurationsMessage, ) -> Result>; ///

Describes whether information, such as queries and connection attempts, is being logged for the specified Amazon Redshift cluster.

async fn describe_logging_status( &self, input: DescribeLoggingStatusMessage, ) -> Result>; ///

Returns properties of possible node configurations such as node type, number of nodes, and disk usage for the specified action type.

async fn describe_node_configuration_options( &self, input: DescribeNodeConfigurationOptionsMessage, ) -> Result>; ///

Returns a list of orderable cluster options. Before you create a new cluster you can use this operation to find what options are available, such as the EC2 Availability Zones (AZ) in the specific AWS Region that you can specify, and the node types you can request. The node types differ by available storage, memory, CPU and price. With the cost involved you might want to obtain a list of cluster options in the specific region and specify values when creating a cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn describe_orderable_cluster_options( &self, input: DescribeOrderableClusterOptionsMessage, ) -> Result>; ///

Returns a list of the available reserved node offerings by Amazon Redshift with their descriptions including the node type, the fixed and recurring costs of reserving the node and duration the node will be reserved for you. These descriptions help you determine which reserve node offering you want to purchase. You then use the unique offering ID in you call to PurchaseReservedNodeOffering to reserve one or more nodes for your Amazon Redshift cluster.

For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

async fn describe_reserved_node_offerings( &self, input: DescribeReservedNodeOfferingsMessage, ) -> Result>; ///

Returns the descriptions of the reserved nodes.

async fn describe_reserved_nodes( &self, input: DescribeReservedNodesMessage, ) -> Result>; ///

Returns information about the last resize operation for the specified cluster. If no resize operation has ever been initiated for the specified cluster, a HTTP 404 error is returned. If a resize operation was initiated and completed, the status of the resize remains as SUCCEEDED until the next resize.

A resize operation can be requested using ModifyCluster and specifying a different number or type of nodes for the cluster.

async fn describe_resize( &self, input: DescribeResizeMessage, ) -> Result>; ///

Describes properties of scheduled actions.

async fn describe_scheduled_actions( &self, input: DescribeScheduledActionsMessage, ) -> Result>; ///

Returns a list of snapshot copy grants owned by the AWS account in the destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

async fn describe_snapshot_copy_grants( &self, input: DescribeSnapshotCopyGrantsMessage, ) -> Result>; ///

Returns a list of snapshot schedules.

async fn describe_snapshot_schedules( &self, input: DescribeSnapshotSchedulesMessage, ) -> Result>; ///

Returns account level backups storage size and provisional storage.

async fn describe_storage( &self, ) -> Result>; ///

Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot API action. If you don't specify a value for the TableRestoreRequestId parameter, then DescribeTableRestoreStatus returns the status of all table restore requests ordered by the date and time of the request in ascending order. Otherwise DescribeTableRestoreStatus returns the status of the table specified by TableRestoreRequestId.

async fn describe_table_restore_status( &self, input: DescribeTableRestoreStatusMessage, ) -> Result>; ///

Returns a list of tags. You can return tags from a specific resource by specifying an ARN, or you can return all tags for a given type of resource, such as clusters, snapshots, and so on.

The following are limitations for DescribeTags:

  • You cannot specify an ARN and a resource-type value together in the same request.

  • You cannot use the MaxRecords and Marker parameters together with the ARN parameter.

  • The MaxRecords parameter can be a range from 10 to 50 results to return in a request.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all resources that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all resources that have any combination of those values are returned.

If both tag keys and values are omitted from the request, resources are returned regardless of whether they have tag keys or values associated with them.

async fn describe_tags( &self, input: DescribeTagsMessage, ) -> Result>; ///

Shows usage limits on a cluster. Results are filtered based on the combination of input usage limit identifier, cluster identifier, and feature type parameters:

  • If usage limit identifier, cluster identifier, and feature type are not provided, then all usage limit objects for the current account in the current region are returned.

  • If usage limit identifier is provided, then the corresponding usage limit object is returned.

  • If cluster identifier is provided, then all usage limit objects for the specified cluster are returned.

  • If cluster identifier and feature type are provided, then all usage limit objects for the combination of cluster and feature are returned.

async fn describe_usage_limits( &self, input: DescribeUsageLimitsMessage, ) -> Result>; ///

Stops logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

async fn disable_logging( &self, input: DisableLoggingMessage, ) -> Result>; ///

Disables the automatic copying of snapshots from one region to another region for a specified cluster.

If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the CMK in the destination region.

async fn disable_snapshot_copy( &self, input: DisableSnapshotCopyMessage, ) -> Result>; ///

Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

async fn enable_logging( &self, input: EnableLoggingMessage, ) -> Result>; ///

Enables the automatic copy of snapshots from one region to another region for a specified cluster.

async fn enable_snapshot_copy( &self, input: EnableSnapshotCopyMessage, ) -> Result>; ///

Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True. You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Using IAM Authentication to Generate Database User Credentials in the Amazon Redshift Cluster Management Guide.

The AWS Identity and Access Management (IAM)user or role that executes GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see Resource Policies for GetClusterCredentials in the Amazon Redshift Cluster Management Guide.

If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups.

In addition, if the AutoCreate parameter is set to True, then the policy must include the redshift:CreateClusterUser privilege.

If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.

async fn get_cluster_credentials( &self, input: GetClusterCredentialsMessage, ) -> Result>; ///

Returns an array of DC2 ReservedNodeOfferings that matches the payment type, term, and usage price of the given DC1 reserved node.

async fn get_reserved_node_exchange_offerings( &self, input: GetReservedNodeExchangeOfferingsInputMessage, ) -> Result< GetReservedNodeExchangeOfferingsOutputMessage, RusotoError, >; ///

Modifies the settings for a cluster.

You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

You can add another security or parameter group, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn modify_cluster( &self, input: ModifyClusterMessage, ) -> Result>; ///

Modifies the database revision of a cluster. The database revision is a unique revision of the database running in a cluster.

async fn modify_cluster_db_revision( &self, input: ModifyClusterDbRevisionMessage, ) -> Result>; ///

Modifies the list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

A cluster can have up to 10 IAM roles associated at any time.

async fn modify_cluster_iam_roles( &self, input: ModifyClusterIamRolesMessage, ) -> Result>; ///

Modifies the maintenance settings of a cluster.

async fn modify_cluster_maintenance( &self, input: ModifyClusterMaintenanceMessage, ) -> Result>; ///

Modifies the parameters of a parameter group.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

async fn modify_cluster_parameter_group( &self, input: ModifyClusterParameterGroupMessage, ) -> Result>; ///

Modifies the settings for a snapshot.

This exanmple modifies the manual retention period setting for a cluster snapshot.

async fn modify_cluster_snapshot( &self, input: ModifyClusterSnapshotMessage, ) -> Result>; ///

Modifies a snapshot schedule for a cluster.

async fn modify_cluster_snapshot_schedule( &self, input: ModifyClusterSnapshotScheduleMessage, ) -> Result<(), RusotoError>; ///

Modifies a cluster subnet group to include the specified list of VPC subnets. The operation replaces the existing list of subnets with the new list of subnets.

async fn modify_cluster_subnet_group( &self, input: ModifyClusterSubnetGroupMessage, ) -> Result>; ///

Modifies an existing Amazon Redshift event notification subscription.

async fn modify_event_subscription( &self, input: ModifyEventSubscriptionMessage, ) -> Result>; ///

Modifies a scheduled action.

async fn modify_scheduled_action( &self, input: ModifyScheduledActionMessage, ) -> Result>; ///

Modifies the number of days to retain snapshots in the destination AWS Region after they are copied from the source AWS Region. By default, this operation only changes the retention period of copied automated snapshots. The retention periods for both new and existing copied automated snapshots are updated with the new retention period. You can set the manual option to change only the retention periods of copied manual snapshots. If you set this option, only newly copied manual snapshots have the new retention period.

async fn modify_snapshot_copy_retention_period( &self, input: ModifySnapshotCopyRetentionPeriodMessage, ) -> Result< ModifySnapshotCopyRetentionPeriodResult, RusotoError, >; ///

Modifies a snapshot schedule. Any schedule associated with a cluster is modified asynchronously.

async fn modify_snapshot_schedule( &self, input: ModifySnapshotScheduleMessage, ) -> Result>; ///

Modifies a usage limit in a cluster. You can't modify the feature type or period of a usage limit.

async fn modify_usage_limit( &self, input: ModifyUsageLimitMessage, ) -> Result>; ///

Pauses a cluster.

async fn pause_cluster( &self, input: PauseClusterMessage, ) -> Result>; ///

Allows you to purchase reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings. You can call this API by providing a specific reserved node offering and the number of nodes you want to reserve.

For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

async fn purchase_reserved_node_offering( &self, input: PurchaseReservedNodeOfferingMessage, ) -> Result>; ///

Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to rebooting. A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster) are applied at this reboot. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn reboot_cluster( &self, input: RebootClusterMessage, ) -> Result>; ///

Sets one or more parameters of the specified parameter group to their default values and sets the source values of the parameters to "engine-default". To reset the entire parameter group specify the ResetAllParameters parameter. For parameter changes to take effect you must reboot any associated clusters.

async fn reset_cluster_parameter_group( &self, input: ResetClusterParameterGroupMessage, ) -> Result>; ///

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc2.large

    • dc2.8xlarge

    • ds2.xlarge

    • ds2.8xlarge

    • ra3.4xlarge

    • ra3.16xlarge

  • The type of nodes that you add must match the node type for the cluster.

async fn resize_cluster( &self, input: ResizeClusterMessage, ) -> Result>; ///

Creates a new cluster from a snapshot. By default, Amazon Redshift creates the resulting cluster with the same configuration as the original cluster from which the snapshot was created, except that the new cluster is created with the default cluster security and parameter groups. After Amazon Redshift creates the cluster, you can use the ModifyCluster API to associate a different security group and different parameter group with the restored cluster. If you are using a DS node type, you can also choose to change to another DS node type of the same size during restore.

If you restore a cluster into a VPC, you must provide a cluster subnet group where you want the cluster restored.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn restore_from_cluster_snapshot( &self, input: RestoreFromClusterSnapshotMessage, ) -> Result>; ///

Creates a new table from a table in an Amazon Redshift cluster snapshot. You must create the new table within the Amazon Redshift cluster that the snapshot was taken from.

You cannot use RestoreTableFromClusterSnapshot to restore a table with the same name as an existing table in an Amazon Redshift cluster. That is, you cannot overwrite an existing table in a cluster with a restored table. If you want to replace your original table with a new, restored table, then rename or drop your original table before you call RestoreTableFromClusterSnapshot. When you have renamed your original table, then you can pass the original name of the table as the NewTableName parameter value in the call to RestoreTableFromClusterSnapshot. This way, you can replace the original table with the table created from the snapshot.

async fn restore_table_from_cluster_snapshot( &self, input: RestoreTableFromClusterSnapshotMessage, ) -> Result< RestoreTableFromClusterSnapshotResult, RusotoError, >; ///

Resumes a paused cluster.

async fn resume_cluster( &self, input: ResumeClusterMessage, ) -> Result>; ///

Revokes an ingress rule in an Amazon Redshift security group for a previously authorized IP range or Amazon EC2 security group. To add an ingress rule, see AuthorizeClusterSecurityGroupIngress. For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

async fn revoke_cluster_security_group_ingress( &self, input: RevokeClusterSecurityGroupIngressMessage, ) -> Result< RevokeClusterSecurityGroupIngressResult, RusotoError, >; ///

Removes the ability of the specified AWS customer account to restore the specified snapshot. If the account is currently restoring the snapshot, the restore will run to completion.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn revoke_snapshot_access( &self, input: RevokeSnapshotAccessMessage, ) -> Result>; ///

Rotates the encryption keys for a cluster.

async fn rotate_encryption_key( &self, input: RotateEncryptionKeyMessage, ) -> Result>; } /// A client for the Amazon Redshift API. #[derive(Clone)] pub struct RedshiftClient { client: Client, region: region::Region, } impl RedshiftClient { /// Creates a client backed by the default tokio event loop. /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> RedshiftClient { RedshiftClient { client: Client::shared(), region, } } pub fn new_with( request_dispatcher: D, credentials_provider: P, region: region::Region, ) -> RedshiftClient where P: ProvideAwsCredentials + Send + Sync + 'static, D: DispatchSignedRequest + Send + Sync + 'static, { RedshiftClient { client: Client::new_with(credentials_provider, request_dispatcher), region, } } pub fn new_with_client(client: Client, region: region::Region) -> RedshiftClient { RedshiftClient { client, region } } } #[async_trait] impl Redshift for RedshiftClient { ///

Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to the configuration (term, payment type, or number of nodes) and no additional costs.

async fn accept_reserved_node_exchange( &self, input: AcceptReservedNodeExchangeInputMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("AcceptReservedNodeExchange"); let mut params = params; AcceptReservedNodeExchangeInputMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, AcceptReservedNodeExchangeError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = AcceptReservedNodeExchangeOutputMessageDeserializer::deserialize( "AcceptReservedNodeExchangeResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an Amazon EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.

If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift cluster must be in the same AWS Region.

If you authorize access to a CIDR/IP address range, specify CIDRIP. For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing.

You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide.

async fn authorize_cluster_security_group_ingress( &self, input: AuthorizeClusterSecurityGroupIngressMessage, ) -> Result< AuthorizeClusterSecurityGroupIngressResult, RusotoError, > { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("AuthorizeClusterSecurityGroupIngress"); let mut params = params; AuthorizeClusterSecurityGroupIngressMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch( request, AuthorizeClusterSecurityGroupIngressError::from_response, ) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = AuthorizeClusterSecurityGroupIngressResultDeserializer::deserialize( "AuthorizeClusterSecurityGroupIngressResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Authorizes the specified AWS customer account to restore the specified snapshot.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn authorize_snapshot_access( &self, input: AuthorizeSnapshotAccessMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("AuthorizeSnapshotAccess"); let mut params = params; AuthorizeSnapshotAccessMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, AuthorizeSnapshotAccessError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = AuthorizeSnapshotAccessResultDeserializer::deserialize( "AuthorizeSnapshotAccessResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Deletes a set of cluster snapshots.

async fn batch_delete_cluster_snapshots( &self, input: BatchDeleteClusterSnapshotsRequest, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("BatchDeleteClusterSnapshots"); let mut params = params; BatchDeleteClusterSnapshotsRequestSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, BatchDeleteClusterSnapshotsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = BatchDeleteClusterSnapshotsResultDeserializer::deserialize( "BatchDeleteClusterSnapshotsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies the settings for a set of cluster snapshots.

async fn batch_modify_cluster_snapshots( &self, input: BatchModifyClusterSnapshotsMessage, ) -> Result< BatchModifyClusterSnapshotsOutputMessage, RusotoError, > { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("BatchModifyClusterSnapshots"); let mut params = params; BatchModifyClusterSnapshotsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, BatchModifyClusterSnapshotsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = BatchModifyClusterSnapshotsOutputMessageDeserializer::deserialize( "BatchModifyClusterSnapshotsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Cancels a resize operation for a cluster.

async fn cancel_resize( &self, input: CancelResizeMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CancelResize"); let mut params = params; CancelResizeMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CancelResizeError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ResizeProgressMessageDeserializer::deserialize("CancelResizeResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Copies the specified automated cluster snapshot to a new manual cluster snapshot. The source must be an automated snapshot and it must be in the available state.

When you delete a cluster, Amazon Redshift deletes any automated snapshots of the cluster. Also, when the retention period of the snapshot expires, Amazon Redshift automatically deletes it. If you want to keep an automated snapshot for a longer period, you can make a manual copy of the snapshot. Manual snapshots are retained until you delete them.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn copy_cluster_snapshot( &self, input: CopyClusterSnapshotMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CopyClusterSnapshot"); let mut params = params; CopyClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CopyClusterSnapshotError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CopyClusterSnapshotResultDeserializer::deserialize( "CopyClusterSnapshotResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates a new cluster with the specified parameters.

To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn create_cluster( &self, input: CreateClusterMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateCluster"); let mut params = params; CreateClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateClusterError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateClusterResultDeserializer::deserialize("CreateClusterResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates an Amazon Redshift parameter group.

Creating parameter groups is independent of creating clusters. You can associate a cluster with a parameter group when you create the cluster. You can also associate an existing cluster with a parameter group after the cluster is created by using ModifyCluster.

Parameters in the parameter group define specific behavior that applies to the databases you create on the cluster. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

async fn create_cluster_parameter_group( &self, input: CreateClusterParameterGroupMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateClusterParameterGroup"); let mut params = params; CreateClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateClusterParameterGroupError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateClusterParameterGroupResultDeserializer::deserialize( "CreateClusterParameterGroupResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates a new Amazon Redshift security group. You use security groups to control access to non-VPC clusters.

For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

async fn create_cluster_security_group( &self, input: CreateClusterSecurityGroupMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateClusterSecurityGroup"); let mut params = params; CreateClusterSecurityGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateClusterSecurityGroupError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateClusterSecurityGroupResultDeserializer::deserialize( "CreateClusterSecurityGroupResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates a manual snapshot of the specified cluster. The cluster must be in the available state.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn create_cluster_snapshot( &self, input: CreateClusterSnapshotMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateClusterSnapshot"); let mut params = params; CreateClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateClusterSnapshotError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateClusterSnapshotResultDeserializer::deserialize( "CreateClusterSnapshotResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.

For information about subnet groups, go to Amazon Redshift Cluster Subnet Groups in the Amazon Redshift Cluster Management Guide.

async fn create_cluster_subnet_group( &self, input: CreateClusterSubnetGroupMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateClusterSubnetGroup"); let mut params = params; CreateClusterSubnetGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateClusterSubnetGroupError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateClusterSubnetGroupResultDeserializer::deserialize( "CreateClusterSubnetGroupResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates an Amazon Redshift event notification subscription. This action requires an ARN (Amazon Resource Name) of an Amazon SNS topic created by either the Amazon Redshift console, the Amazon SNS console, or the Amazon SNS API. To obtain an ARN with Amazon SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the source type, and lists of Amazon Redshift source IDs, event categories, and event severities. Notifications will be sent for all events you want that match those criteria. For example, you can specify source type = cluster, source ID = my-cluster-1 and mycluster2, event categories = Availability, Backup, and severity = ERROR. The subscription will only send notifications for those ERROR events in the Availability and Backup categories for the specified clusters.

If you specify both the source type and source IDs, such as source type = cluster and source identifier = my-cluster-1, notifications will be sent for all the cluster events for my-cluster-1. If you specify a source type but do not specify a source identifier, you will receive notice of the events for the objects of that type in your AWS account. If you do not specify either the SourceType nor the SourceIdentifier, you will be notified of events generated from all Amazon Redshift sources belonging to your AWS account. You must specify a source type if you specify a source ID.

async fn create_event_subscription( &self, input: CreateEventSubscriptionMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateEventSubscription"); let mut params = params; CreateEventSubscriptionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateEventSubscriptionError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateEventSubscriptionResultDeserializer::deserialize( "CreateEventSubscriptionResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates an HSM client certificate that an Amazon Redshift cluster will use to connect to the client's HSM in order to store and retrieve the keys used to encrypt the cluster databases.

The command returns a public key, which you must store in the HSM. In addition to creating the HSM certificate, you must create an Amazon Redshift HSM configuration that provides a cluster the information needed to store and use encryption keys in the HSM. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.

async fn create_hsm_client_certificate( &self, input: CreateHsmClientCertificateMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateHsmClientCertificate"); let mut params = params; CreateHsmClientCertificateMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateHsmClientCertificateError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateHsmClientCertificateResultDeserializer::deserialize( "CreateHsmClientCertificateResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates an HSM configuration that contains the information required by an Amazon Redshift cluster to store and use database encryption keys in a Hardware Security Module (HSM). After creating the HSM configuration, you can specify it as a parameter when creating a cluster. The cluster will then store its encryption keys in the HSM.

In addition to creating an HSM configuration, you must also create an HSM client certificate. For more information, go to Hardware Security Modules in the Amazon Redshift Cluster Management Guide.

async fn create_hsm_configuration( &self, input: CreateHsmConfigurationMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateHsmConfiguration"); let mut params = params; CreateHsmConfigurationMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateHsmConfigurationError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateHsmConfigurationResultDeserializer::deserialize( "CreateHsmConfigurationResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates a scheduled action. A scheduled action contains a schedule and an Amazon Redshift API action. For example, you can create a schedule of when to run the ResizeCluster API operation.

async fn create_scheduled_action( &self, input: CreateScheduledActionMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateScheduledAction"); let mut params = params; CreateScheduledActionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateScheduledActionError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ScheduledActionDeserializer::deserialize("CreateScheduledActionResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied snapshots in a destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

async fn create_snapshot_copy_grant( &self, input: CreateSnapshotCopyGrantMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateSnapshotCopyGrant"); let mut params = params; CreateSnapshotCopyGrantMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateSnapshotCopyGrantError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CreateSnapshotCopyGrantResultDeserializer::deserialize( "CreateSnapshotCopyGrantResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Create a snapshot schedule that can be associated to a cluster and which overrides the default system backup schedule.

async fn create_snapshot_schedule( &self, input: CreateSnapshotScheduleMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateSnapshotSchedule"); let mut params = params; CreateSnapshotScheduleMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateSnapshotScheduleError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = SnapshotScheduleDeserializer::deserialize("CreateSnapshotScheduleResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Adds tags to a cluster.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

async fn create_tags( &self, input: CreateTagsMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateTags"); let mut params = params; CreateTagsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateTagsError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Creates a usage limit for a specified Amazon Redshift feature on a cluster. The usage limit is identified by the returned usage limit identifier.

async fn create_usage_limit( &self, input: CreateUsageLimitMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("CreateUsageLimit"); let mut params = params; CreateUsageLimitMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, CreateUsageLimitError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = UsageLimitDeserializer::deserialize("CreateUsageLimitResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Deletes a previously provisioned cluster without its final snapshot being created. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be "final-snapshot" while the snapshot is being taken, then it's "deleting" once Amazon Redshift begins deleting the cluster.

For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn delete_cluster( &self, input: DeleteClusterMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteCluster"); let mut params = params; DeleteClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteClusterError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = DeleteClusterResultDeserializer::deserialize("DeleteClusterResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Deletes a specified Amazon Redshift parameter group.

You cannot delete a parameter group if it is associated with a cluster.

async fn delete_cluster_parameter_group( &self, input: DeleteClusterParameterGroupMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteClusterParameterGroup"); let mut params = params; DeleteClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteClusterParameterGroupError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes an Amazon Redshift security group.

You cannot delete a security group that is associated with any clusters. You cannot delete the default security group.

For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

async fn delete_cluster_security_group( &self, input: DeleteClusterSecurityGroupMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteClusterSecurityGroup"); let mut params = params; DeleteClusterSecurityGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteClusterSecurityGroupError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes the specified manual snapshot. The snapshot must be in the available state, with no other users authorized to access the snapshot.

Unlike automated snapshots, manual snapshots are retained even after you delete your cluster. Amazon Redshift does not delete your manual snapshots. You must delete manual snapshot explicitly to avoid getting charged. If other accounts are authorized to access the snapshot, you must revoke all of the authorizations before you can delete the snapshot.

async fn delete_cluster_snapshot( &self, input: DeleteClusterSnapshotMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteClusterSnapshot"); let mut params = params; DeleteClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteClusterSnapshotError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = DeleteClusterSnapshotResultDeserializer::deserialize( "DeleteClusterSnapshotResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Deletes the specified cluster subnet group.

async fn delete_cluster_subnet_group( &self, input: DeleteClusterSubnetGroupMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteClusterSubnetGroup"); let mut params = params; DeleteClusterSubnetGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteClusterSubnetGroupError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes an Amazon Redshift event notification subscription.

async fn delete_event_subscription( &self, input: DeleteEventSubscriptionMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteEventSubscription"); let mut params = params; DeleteEventSubscriptionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteEventSubscriptionError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes the specified HSM client certificate.

async fn delete_hsm_client_certificate( &self, input: DeleteHsmClientCertificateMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteHsmClientCertificate"); let mut params = params; DeleteHsmClientCertificateMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteHsmClientCertificateError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes the specified Amazon Redshift HSM configuration.

async fn delete_hsm_configuration( &self, input: DeleteHsmConfigurationMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteHsmConfiguration"); let mut params = params; DeleteHsmConfigurationMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteHsmConfigurationError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes a scheduled action.

async fn delete_scheduled_action( &self, input: DeleteScheduledActionMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteScheduledAction"); let mut params = params; DeleteScheduledActionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteScheduledActionError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes the specified snapshot copy grant.

async fn delete_snapshot_copy_grant( &self, input: DeleteSnapshotCopyGrantMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteSnapshotCopyGrant"); let mut params = params; DeleteSnapshotCopyGrantMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteSnapshotCopyGrantError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes a snapshot schedule.

async fn delete_snapshot_schedule( &self, input: DeleteSnapshotScheduleMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteSnapshotSchedule"); let mut params = params; DeleteSnapshotScheduleMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteSnapshotScheduleError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

async fn delete_tags( &self, input: DeleteTagsMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteTags"); let mut params = params; DeleteTagsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteTagsError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Deletes a usage limit from a cluster.

async fn delete_usage_limit( &self, input: DeleteUsageLimitMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DeleteUsageLimit"); let mut params = params; DeleteUsageLimitMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DeleteUsageLimitError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Returns a list of attributes attached to an account

async fn describe_account_attributes( &self, input: DescribeAccountAttributesMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeAccountAttributes"); let mut params = params; DescribeAccountAttributesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeAccountAttributesError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = AccountAttributeListDeserializer::deserialize( "DescribeAccountAttributesResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns an array of ClusterDbRevision objects.

async fn describe_cluster_db_revisions( &self, input: DescribeClusterDbRevisionsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusterDbRevisions"); let mut params = params; DescribeClusterDbRevisionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClusterDbRevisionsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterDbRevisionsMessageDeserializer::deserialize( "DescribeClusterDbRevisionsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a list of Amazon Redshift parameter groups, including parameter groups you created and the default parameter group. For each parameter group, the response includes the parameter group name, description, and parameter group family name. You can optionally specify a name to retrieve the description of a specific parameter group.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all parameter groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all parameter groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, parameter groups are returned regardless of whether they have tag keys or values associated with them.

async fn describe_cluster_parameter_groups( &self, input: DescribeClusterParameterGroupsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusterParameterGroups"); let mut params = params; DescribeClusterParameterGroupsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClusterParameterGroupsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterParameterGroupsMessageDeserializer::deserialize( "DescribeClusterParameterGroupsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a detailed list of parameters contained within the specified Amazon Redshift parameter group. For each parameter the response includes information such as parameter name, description, data type, value, whether the parameter value is modifiable, and so on.

You can specify source filter to retrieve parameters of only specific type. For example, to retrieve parameters that were modified by a user action such as from ModifyClusterParameterGroup, you can specify source equal to user.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

async fn describe_cluster_parameters( &self, input: DescribeClusterParametersMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusterParameters"); let mut params = params; DescribeClusterParametersMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClusterParametersError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterParameterGroupDetailsDeserializer::deserialize( "DescribeClusterParametersResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns information about Amazon Redshift security groups. If the name of a security group is specified, the response will contain only information about only that security group.

For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all security groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all security groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, security groups are returned regardless of whether they have tag keys or values associated with them.

async fn describe_cluster_security_groups( &self, input: DescribeClusterSecurityGroupsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusterSecurityGroups"); let mut params = params; DescribeClusterSecurityGroupsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClusterSecurityGroupsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterSecurityGroupMessageDeserializer::deserialize( "DescribeClusterSecurityGroupsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns one or more snapshot objects, which contain metadata about your cluster snapshots. By default, this operation returns information about all snapshots of all clusters that are owned by you AWS customer account. No information is returned for snapshots owned by inactive AWS customer accounts.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all snapshots that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all snapshots that have any combination of those values are returned. Only snapshots that you own are returned in the response; shared snapshots are not returned with the tag key and tag value request parameters.

If both tag keys and values are omitted from the request, snapshots are returned regardless of whether they have tag keys or values associated with them.

async fn describe_cluster_snapshots( &self, input: DescribeClusterSnapshotsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusterSnapshots"); let mut params = params; DescribeClusterSnapshotsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClusterSnapshotsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = SnapshotMessageDeserializer::deserialize("DescribeClusterSnapshotsResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in you AWS account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.

If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.

async fn describe_cluster_subnet_groups( &self, input: DescribeClusterSubnetGroupsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusterSubnetGroups"); let mut params = params; DescribeClusterSubnetGroupsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClusterSubnetGroupsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterSubnetGroupMessageDeserializer::deserialize( "DescribeClusterSubnetGroupsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a list of all the available maintenance tracks.

async fn describe_cluster_tracks( &self, input: DescribeClusterTracksMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusterTracks"); let mut params = params; DescribeClusterTracksMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClusterTracksError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = TrackListMessageDeserializer::deserialize("DescribeClusterTracksResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns descriptions of the available Amazon Redshift cluster versions. You can call this operation even before creating any clusters to learn more about the Amazon Redshift versions. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn describe_cluster_versions( &self, input: DescribeClusterVersionsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusterVersions"); let mut params = params; DescribeClusterVersionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClusterVersionsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterVersionsMessageDeserializer::deserialize( "DescribeClusterVersionsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned.

If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.

async fn describe_clusters( &self, input: DescribeClustersMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeClusters"); let mut params = params; DescribeClustersMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeClustersError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClustersMessageDeserializer::deserialize("DescribeClustersResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a list of parameter settings for the specified parameter group family.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

async fn describe_default_cluster_parameters( &self, input: DescribeDefaultClusterParametersMessage, ) -> Result< DescribeDefaultClusterParametersResult, RusotoError, > { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeDefaultClusterParameters"); let mut params = params; DescribeDefaultClusterParametersMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch( request, DescribeDefaultClusterParametersError::from_response, ) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = DescribeDefaultClusterParametersResultDeserializer::deserialize( "DescribeDefaultClusterParametersResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Displays a list of event categories for all event source types, or for a specified source type. For a list of the event categories and source types, go to Amazon Redshift Event Notifications.

async fn describe_event_categories( &self, input: DescribeEventCategoriesMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeEventCategories"); let mut params = params; DescribeEventCategoriesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeEventCategoriesError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = EventCategoriesMessageDeserializer::deserialize( "DescribeEventCategoriesResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Lists descriptions of all the Amazon Redshift event notification subscriptions for a customer account. If you specify a subscription name, lists the description for that subscription.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all event notification subscriptions that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subscriptions that have any combination of those values are returned.

If both tag keys and values are omitted from the request, subscriptions are returned regardless of whether they have tag keys or values associated with them.

async fn describe_event_subscriptions( &self, input: DescribeEventSubscriptionsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeEventSubscriptions"); let mut params = params; DescribeEventSubscriptionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeEventSubscriptionsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = EventSubscriptionsMessageDeserializer::deserialize( "DescribeEventSubscriptionsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns events related to clusters, security groups, snapshots, and parameter groups for the past 14 days. Events specific to a particular cluster, security group, snapshot or parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

async fn describe_events( &self, input: DescribeEventsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeEvents"); let mut params = params; DescribeEventsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeEventsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = EventsMessageDeserializer::deserialize("DescribeEventsResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM client certificates that have any combination of those values are returned.

If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.

async fn describe_hsm_client_certificates( &self, input: DescribeHsmClientCertificatesMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeHsmClientCertificates"); let mut params = params; DescribeHsmClientCertificatesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeHsmClientCertificatesError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = HsmClientCertificateMessageDeserializer::deserialize( "DescribeHsmClientCertificatesResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM connections that have any combination of those values are returned.

If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.

async fn describe_hsm_configurations( &self, input: DescribeHsmConfigurationsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeHsmConfigurations"); let mut params = params; DescribeHsmConfigurationsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeHsmConfigurationsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = HsmConfigurationMessageDeserializer::deserialize( "DescribeHsmConfigurationsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Describes whether information, such as queries and connection attempts, is being logged for the specified Amazon Redshift cluster.

async fn describe_logging_status( &self, input: DescribeLoggingStatusMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeLoggingStatus"); let mut params = params; DescribeLoggingStatusMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeLoggingStatusError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = LoggingStatusDeserializer::deserialize("DescribeLoggingStatusResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns properties of possible node configurations such as node type, number of nodes, and disk usage for the specified action type.

async fn describe_node_configuration_options( &self, input: DescribeNodeConfigurationOptionsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeNodeConfigurationOptions"); let mut params = params; DescribeNodeConfigurationOptionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch( request, DescribeNodeConfigurationOptionsError::from_response, ) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = NodeConfigurationOptionsMessageDeserializer::deserialize( "DescribeNodeConfigurationOptionsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a list of orderable cluster options. Before you create a new cluster you can use this operation to find what options are available, such as the EC2 Availability Zones (AZ) in the specific AWS Region that you can specify, and the node types you can request. The node types differ by available storage, memory, CPU and price. With the cost involved you might want to obtain a list of cluster options in the specific region and specify values when creating a cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn describe_orderable_cluster_options( &self, input: DescribeOrderableClusterOptionsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeOrderableClusterOptions"); let mut params = params; DescribeOrderableClusterOptionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeOrderableClusterOptionsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = OrderableClusterOptionsMessageDeserializer::deserialize( "DescribeOrderableClusterOptionsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a list of the available reserved node offerings by Amazon Redshift with their descriptions including the node type, the fixed and recurring costs of reserving the node and duration the node will be reserved for you. These descriptions help you determine which reserve node offering you want to purchase. You then use the unique offering ID in you call to PurchaseReservedNodeOffering to reserve one or more nodes for your Amazon Redshift cluster.

For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

async fn describe_reserved_node_offerings( &self, input: DescribeReservedNodeOfferingsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeReservedNodeOfferings"); let mut params = params; DescribeReservedNodeOfferingsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeReservedNodeOfferingsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ReservedNodeOfferingsMessageDeserializer::deserialize( "DescribeReservedNodeOfferingsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns the descriptions of the reserved nodes.

async fn describe_reserved_nodes( &self, input: DescribeReservedNodesMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeReservedNodes"); let mut params = params; DescribeReservedNodesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeReservedNodesError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ReservedNodesMessageDeserializer::deserialize( "DescribeReservedNodesResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns information about the last resize operation for the specified cluster. If no resize operation has ever been initiated for the specified cluster, a HTTP 404 error is returned. If a resize operation was initiated and completed, the status of the resize remains as SUCCEEDED until the next resize.

A resize operation can be requested using ModifyCluster and specifying a different number or type of nodes for the cluster.

async fn describe_resize( &self, input: DescribeResizeMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeResize"); let mut params = params; DescribeResizeMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeResizeError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ResizeProgressMessageDeserializer::deserialize("DescribeResizeResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Describes properties of scheduled actions.

async fn describe_scheduled_actions( &self, input: DescribeScheduledActionsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeScheduledActions"); let mut params = params; DescribeScheduledActionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeScheduledActionsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ScheduledActionsMessageDeserializer::deserialize( "DescribeScheduledActionsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a list of snapshot copy grants owned by the AWS account in the destination region.

For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide.

async fn describe_snapshot_copy_grants( &self, input: DescribeSnapshotCopyGrantsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeSnapshotCopyGrants"); let mut params = params; DescribeSnapshotCopyGrantsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeSnapshotCopyGrantsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = SnapshotCopyGrantMessageDeserializer::deserialize( "DescribeSnapshotCopyGrantsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a list of snapshot schedules.

async fn describe_snapshot_schedules( &self, input: DescribeSnapshotSchedulesMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeSnapshotSchedules"); let mut params = params; DescribeSnapshotSchedulesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeSnapshotSchedulesError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = DescribeSnapshotSchedulesOutputMessageDeserializer::deserialize( "DescribeSnapshotSchedulesResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns account level backups storage size and provisional storage.

async fn describe_storage( &self, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeStorage"); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeStorageError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = CustomerStorageMessageDeserializer::deserialize("DescribeStorageResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot API action. If you don't specify a value for the TableRestoreRequestId parameter, then DescribeTableRestoreStatus returns the status of all table restore requests ordered by the date and time of the request in ascending order. Otherwise DescribeTableRestoreStatus returns the status of the table specified by TableRestoreRequestId.

async fn describe_table_restore_status( &self, input: DescribeTableRestoreStatusMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeTableRestoreStatus"); let mut params = params; DescribeTableRestoreStatusMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeTableRestoreStatusError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = TableRestoreStatusMessageDeserializer::deserialize( "DescribeTableRestoreStatusResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a list of tags. You can return tags from a specific resource by specifying an ARN, or you can return all tags for a given type of resource, such as clusters, snapshots, and so on.

The following are limitations for DescribeTags:

  • You cannot specify an ARN and a resource-type value together in the same request.

  • You cannot use the MaxRecords and Marker parameters together with the ARN parameter.

  • The MaxRecords parameter can be a range from 10 to 50 results to return in a request.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all resources that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all resources that have any combination of those values are returned.

If both tag keys and values are omitted from the request, resources are returned regardless of whether they have tag keys or values associated with them.

async fn describe_tags( &self, input: DescribeTagsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeTags"); let mut params = params; DescribeTagsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeTagsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = TaggedResourceListMessageDeserializer::deserialize("DescribeTagsResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Shows usage limits on a cluster. Results are filtered based on the combination of input usage limit identifier, cluster identifier, and feature type parameters:

  • If usage limit identifier, cluster identifier, and feature type are not provided, then all usage limit objects for the current account in the current region are returned.

  • If usage limit identifier is provided, then the corresponding usage limit object is returned.

  • If cluster identifier is provided, then all usage limit objects for the specified cluster are returned.

  • If cluster identifier and feature type are provided, then all usage limit objects for the combination of cluster and feature are returned.

async fn describe_usage_limits( &self, input: DescribeUsageLimitsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DescribeUsageLimits"); let mut params = params; DescribeUsageLimitsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DescribeUsageLimitsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = UsageLimitListDeserializer::deserialize("DescribeUsageLimitsResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Stops logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

async fn disable_logging( &self, input: DisableLoggingMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DisableLogging"); let mut params = params; DisableLoggingMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DisableLoggingError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = LoggingStatusDeserializer::deserialize("DisableLoggingResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Disables the automatic copying of snapshots from one region to another region for a specified cluster.

If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the CMK in the destination region.

async fn disable_snapshot_copy( &self, input: DisableSnapshotCopyMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("DisableSnapshotCopy"); let mut params = params; DisableSnapshotCopyMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, DisableSnapshotCopyError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = DisableSnapshotCopyResultDeserializer::deserialize( "DisableSnapshotCopyResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

async fn enable_logging( &self, input: EnableLoggingMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("EnableLogging"); let mut params = params; EnableLoggingMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, EnableLoggingError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = LoggingStatusDeserializer::deserialize("EnableLoggingResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Enables the automatic copy of snapshots from one region to another region for a specified cluster.

async fn enable_snapshot_copy( &self, input: EnableSnapshotCopyMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("EnableSnapshotCopy"); let mut params = params; EnableSnapshotCopyMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, EnableSnapshotCopyError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = EnableSnapshotCopyResultDeserializer::deserialize( "EnableSnapshotCopyResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True. You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Using IAM Authentication to Generate Database User Credentials in the Amazon Redshift Cluster Management Guide.

The AWS Identity and Access Management (IAM)user or role that executes GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see Resource Policies for GetClusterCredentials in the Amazon Redshift Cluster Management Guide.

If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups.

In addition, if the AutoCreate parameter is set to True, then the policy must include the redshift:CreateClusterUser privilege.

If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.

async fn get_cluster_credentials( &self, input: GetClusterCredentialsMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("GetClusterCredentials"); let mut params = params; GetClusterCredentialsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, GetClusterCredentialsError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterCredentialsDeserializer::deserialize("GetClusterCredentialsResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Returns an array of DC2 ReservedNodeOfferings that matches the payment type, term, and usage price of the given DC1 reserved node.

async fn get_reserved_node_exchange_offerings( &self, input: GetReservedNodeExchangeOfferingsInputMessage, ) -> Result< GetReservedNodeExchangeOfferingsOutputMessage, RusotoError, > { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("GetReservedNodeExchangeOfferings"); let mut params = params; GetReservedNodeExchangeOfferingsInputMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch( request, GetReservedNodeExchangeOfferingsError::from_response, ) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = GetReservedNodeExchangeOfferingsOutputMessageDeserializer::deserialize( "GetReservedNodeExchangeOfferingsResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies the settings for a cluster.

You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

You can add another security or parameter group, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn modify_cluster( &self, input: ModifyClusterMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyCluster"); let mut params = params; ModifyClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyClusterError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ModifyClusterResultDeserializer::deserialize("ModifyClusterResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies the database revision of a cluster. The database revision is a unique revision of the database running in a cluster.

async fn modify_cluster_db_revision( &self, input: ModifyClusterDbRevisionMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyClusterDbRevision"); let mut params = params; ModifyClusterDbRevisionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyClusterDbRevisionError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ModifyClusterDbRevisionResultDeserializer::deserialize( "ModifyClusterDbRevisionResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies the list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

A cluster can have up to 10 IAM roles associated at any time.

async fn modify_cluster_iam_roles( &self, input: ModifyClusterIamRolesMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyClusterIamRoles"); let mut params = params; ModifyClusterIamRolesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyClusterIamRolesError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ModifyClusterIamRolesResultDeserializer::deserialize( "ModifyClusterIamRolesResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies the maintenance settings of a cluster.

async fn modify_cluster_maintenance( &self, input: ModifyClusterMaintenanceMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyClusterMaintenance"); let mut params = params; ModifyClusterMaintenanceMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyClusterMaintenanceError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ModifyClusterMaintenanceResultDeserializer::deserialize( "ModifyClusterMaintenanceResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies the parameters of a parameter group.

For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide.

async fn modify_cluster_parameter_group( &self, input: ModifyClusterParameterGroupMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyClusterParameterGroup"); let mut params = params; ModifyClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyClusterParameterGroupError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterParameterGroupNameMessageDeserializer::deserialize( "ModifyClusterParameterGroupResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies the settings for a snapshot.

This exanmple modifies the manual retention period setting for a cluster snapshot.

async fn modify_cluster_snapshot( &self, input: ModifyClusterSnapshotMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyClusterSnapshot"); let mut params = params; ModifyClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyClusterSnapshotError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ModifyClusterSnapshotResultDeserializer::deserialize( "ModifyClusterSnapshotResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies a snapshot schedule for a cluster.

async fn modify_cluster_snapshot_schedule( &self, input: ModifyClusterSnapshotScheduleMessage, ) -> Result<(), RusotoError> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyClusterSnapshotSchedule"); let mut params = params; ModifyClusterSnapshotScheduleMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyClusterSnapshotScheduleError::from_response) .await?; std::mem::drop(response); Ok(()) } ///

Modifies a cluster subnet group to include the specified list of VPC subnets. The operation replaces the existing list of subnets with the new list of subnets.

async fn modify_cluster_subnet_group( &self, input: ModifyClusterSubnetGroupMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyClusterSubnetGroup"); let mut params = params; ModifyClusterSubnetGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyClusterSubnetGroupError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ModifyClusterSubnetGroupResultDeserializer::deserialize( "ModifyClusterSubnetGroupResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies an existing Amazon Redshift event notification subscription.

async fn modify_event_subscription( &self, input: ModifyEventSubscriptionMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyEventSubscription"); let mut params = params; ModifyEventSubscriptionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyEventSubscriptionError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ModifyEventSubscriptionResultDeserializer::deserialize( "ModifyEventSubscriptionResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies a scheduled action.

async fn modify_scheduled_action( &self, input: ModifyScheduledActionMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyScheduledAction"); let mut params = params; ModifyScheduledActionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyScheduledActionError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ScheduledActionDeserializer::deserialize("ModifyScheduledActionResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies the number of days to retain snapshots in the destination AWS Region after they are copied from the source AWS Region. By default, this operation only changes the retention period of copied automated snapshots. The retention periods for both new and existing copied automated snapshots are updated with the new retention period. You can set the manual option to change only the retention periods of copied manual snapshots. If you set this option, only newly copied manual snapshots have the new retention period.

async fn modify_snapshot_copy_retention_period( &self, input: ModifySnapshotCopyRetentionPeriodMessage, ) -> Result< ModifySnapshotCopyRetentionPeriodResult, RusotoError, > { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifySnapshotCopyRetentionPeriod"); let mut params = params; ModifySnapshotCopyRetentionPeriodMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch( request, ModifySnapshotCopyRetentionPeriodError::from_response, ) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ModifySnapshotCopyRetentionPeriodResultDeserializer::deserialize( "ModifySnapshotCopyRetentionPeriodResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies a snapshot schedule. Any schedule associated with a cluster is modified asynchronously.

async fn modify_snapshot_schedule( &self, input: ModifySnapshotScheduleMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifySnapshotSchedule"); let mut params = params; ModifySnapshotScheduleMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifySnapshotScheduleError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = SnapshotScheduleDeserializer::deserialize("ModifySnapshotScheduleResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Modifies a usage limit in a cluster. You can't modify the feature type or period of a usage limit.

async fn modify_usage_limit( &self, input: ModifyUsageLimitMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ModifyUsageLimit"); let mut params = params; ModifyUsageLimitMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ModifyUsageLimitError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = UsageLimitDeserializer::deserialize("ModifyUsageLimitResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Pauses a cluster.

async fn pause_cluster( &self, input: PauseClusterMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("PauseCluster"); let mut params = params; PauseClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, PauseClusterError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = PauseClusterResultDeserializer::deserialize("PauseClusterResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Allows you to purchase reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings. You can call this API by providing a specific reserved node offering and the number of nodes you want to reserve.

For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

async fn purchase_reserved_node_offering( &self, input: PurchaseReservedNodeOfferingMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("PurchaseReservedNodeOffering"); let mut params = params; PurchaseReservedNodeOfferingMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, PurchaseReservedNodeOfferingError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = PurchaseReservedNodeOfferingResultDeserializer::deserialize( "PurchaseReservedNodeOfferingResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to rebooting. A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster) are applied at this reboot. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

async fn reboot_cluster( &self, input: RebootClusterMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("RebootCluster"); let mut params = params; RebootClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, RebootClusterError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = RebootClusterResultDeserializer::deserialize("RebootClusterResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Sets one or more parameters of the specified parameter group to their default values and sets the source values of the parameters to "engine-default". To reset the entire parameter group specify the ResetAllParameters parameter. For parameter changes to take effect you must reboot any associated clusters.

async fn reset_cluster_parameter_group( &self, input: ResetClusterParameterGroupMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ResetClusterParameterGroup"); let mut params = params; ResetClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ResetClusterParameterGroupError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ClusterParameterGroupNameMessageDeserializer::deserialize( "ResetClusterParameterGroupResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc2.large

    • dc2.8xlarge

    • ds2.xlarge

    • ds2.8xlarge

    • ra3.4xlarge

    • ra3.16xlarge

  • The type of nodes that you add must match the node type for the cluster.

async fn resize_cluster( &self, input: ResizeClusterMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ResizeCluster"); let mut params = params; ResizeClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ResizeClusterError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ResizeClusterResultDeserializer::deserialize("ResizeClusterResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates a new cluster from a snapshot. By default, Amazon Redshift creates the resulting cluster with the same configuration as the original cluster from which the snapshot was created, except that the new cluster is created with the default cluster security and parameter groups. After Amazon Redshift creates the cluster, you can use the ModifyCluster API to associate a different security group and different parameter group with the restored cluster. If you are using a DS node type, you can also choose to change to another DS node type of the same size during restore.

If you restore a cluster into a VPC, you must provide a cluster subnet group where you want the cluster restored.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn restore_from_cluster_snapshot( &self, input: RestoreFromClusterSnapshotMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("RestoreFromClusterSnapshot"); let mut params = params; RestoreFromClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, RestoreFromClusterSnapshotError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = RestoreFromClusterSnapshotResultDeserializer::deserialize( "RestoreFromClusterSnapshotResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Creates a new table from a table in an Amazon Redshift cluster snapshot. You must create the new table within the Amazon Redshift cluster that the snapshot was taken from.

You cannot use RestoreTableFromClusterSnapshot to restore a table with the same name as an existing table in an Amazon Redshift cluster. That is, you cannot overwrite an existing table in a cluster with a restored table. If you want to replace your original table with a new, restored table, then rename or drop your original table before you call RestoreTableFromClusterSnapshot. When you have renamed your original table, then you can pass the original name of the table as the NewTableName parameter value in the call to RestoreTableFromClusterSnapshot. This way, you can replace the original table with the table created from the snapshot.

async fn restore_table_from_cluster_snapshot( &self, input: RestoreTableFromClusterSnapshotMessage, ) -> Result< RestoreTableFromClusterSnapshotResult, RusotoError, > { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("RestoreTableFromClusterSnapshot"); let mut params = params; RestoreTableFromClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, RestoreTableFromClusterSnapshotError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = RestoreTableFromClusterSnapshotResultDeserializer::deserialize( "RestoreTableFromClusterSnapshotResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Resumes a paused cluster.

async fn resume_cluster( &self, input: ResumeClusterMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("ResumeCluster"); let mut params = params; ResumeClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, ResumeClusterError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = ResumeClusterResultDeserializer::deserialize("ResumeClusterResult", stack)?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Revokes an ingress rule in an Amazon Redshift security group for a previously authorized IP range or Amazon EC2 security group. To add an ingress rule, see AuthorizeClusterSecurityGroupIngress. For information about managing security groups, go to Amazon Redshift Cluster Security Groups in the Amazon Redshift Cluster Management Guide.

async fn revoke_cluster_security_group_ingress( &self, input: RevokeClusterSecurityGroupIngressMessage, ) -> Result< RevokeClusterSecurityGroupIngressResult, RusotoError, > { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("RevokeClusterSecurityGroupIngress"); let mut params = params; RevokeClusterSecurityGroupIngressMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch( request, RevokeClusterSecurityGroupIngressError::from_response, ) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = RevokeClusterSecurityGroupIngressResultDeserializer::deserialize( "RevokeClusterSecurityGroupIngressResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Removes the ability of the specified AWS customer account to restore the specified snapshot. If the account is currently restoring the snapshot, the restore will run to completion.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

async fn revoke_snapshot_access( &self, input: RevokeSnapshotAccessMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("RevokeSnapshotAccess"); let mut params = params; RevokeSnapshotAccessMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, RevokeSnapshotAccessError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = RevokeSnapshotAccessResultDeserializer::deserialize( "RevokeSnapshotAccessResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } ///

Rotates the encryption keys for a cluster.

async fn rotate_encryption_key( &self, input: RotateEncryptionKeyMessage, ) -> Result> { let mut request = SignedRequest::new("POST", "redshift", &self.region, "/"); let params = self.new_params("RotateEncryptionKey"); let mut params = params; RotateEncryptionKeyMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some(serde_urlencoded::to_string(¶ms).unwrap())); request.set_content_type("application/x-www-form-urlencoded".to_owned()); let response = self .sign_and_dispatch(request, RotateEncryptionKeyError::from_response) .await?; let mut response = response; let result = xml_util::parse_response(&mut response, |actual_tag_name, stack| { xml_util::start_element(actual_tag_name, stack)?; let result = RotateEncryptionKeyResultDeserializer::deserialize( "RotateEncryptionKeyResult", stack, )?; skip_tree(stack); xml_util::end_element(actual_tag_name, stack)?; Ok(result) }) .await?; drop(response); // parse non-payload Ok(result) } } #[cfg(test)] mod protocol_tests { extern crate rusoto_mock; use self::rusoto_mock::*; use super::*; use rusoto_core::Region as rusoto_region; #[tokio::test] async fn test_parse_valid_redshift_authorize_cluster_security_group_ingress() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-authorize-cluster-security-group-ingress.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = AuthorizeClusterSecurityGroupIngressMessage::default(); let result = client .authorize_cluster_security_group_ingress(request) .await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_copy_cluster_snapshot() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-copy-cluster-snapshot.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = CopyClusterSnapshotMessage::default(); let result = client.copy_cluster_snapshot(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_create_cluster_parameter_group() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-create-cluster-parameter-group.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = CreateClusterParameterGroupMessage::default(); let result = client.create_cluster_parameter_group(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_create_cluster_security_group() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-create-cluster-security-group.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = CreateClusterSecurityGroupMessage::default(); let result = client.create_cluster_security_group(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_create_cluster_snapshot() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-create-cluster-snapshot.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = CreateClusterSnapshotMessage::default(); let result = client.create_cluster_snapshot(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_create_cluster_subnet_group() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-create-cluster-subnet-group.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = CreateClusterSubnetGroupMessage::default(); let result = client.create_cluster_subnet_group(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_create_cluster() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-create-cluster.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = CreateClusterMessage::default(); let result = client.create_cluster(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_delete_cluster_parameter_group() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-delete-cluster-parameter-group.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DeleteClusterParameterGroupMessage::default(); let result = client.delete_cluster_parameter_group(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_delete_cluster_snapshot() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-delete-cluster-snapshot.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DeleteClusterSnapshotMessage::default(); let result = client.delete_cluster_snapshot(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_delete_cluster() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-delete-cluster.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DeleteClusterMessage::default(); let result = client.delete_cluster(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_cluster_parameter_groups() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-cluster-parameter-groups.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeClusterParameterGroupsMessage::default(); let result = client.describe_cluster_parameter_groups(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_cluster_parameters() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-cluster-parameters.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeClusterParametersMessage::default(); let result = client.describe_cluster_parameters(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_cluster_security_groups() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-cluster-security-groups.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeClusterSecurityGroupsMessage::default(); let result = client.describe_cluster_security_groups(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_cluster_snapshots() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-cluster-snapshots.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeClusterSnapshotsMessage::default(); let result = client.describe_cluster_snapshots(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_cluster_subnet_groups() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-cluster-subnet-groups.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeClusterSubnetGroupsMessage::default(); let result = client.describe_cluster_subnet_groups(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_cluster_versions() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-cluster-versions.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeClusterVersionsMessage::default(); let result = client.describe_cluster_versions(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_clusters() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-clusters.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeClustersMessage::default(); let result = client.describe_clusters(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_events() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-events.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeEventsMessage::default(); let result = client.describe_events(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_orderable_cluster_options() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-orderable-cluster-options.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeOrderableClusterOptionsMessage::default(); let result = client.describe_orderable_cluster_options(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_reserved_node_offerings() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-reserved-node-offerings.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeReservedNodeOfferingsMessage::default(); let result = client.describe_reserved_node_offerings(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_reserved_nodes() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-reserved-nodes.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeReservedNodesMessage::default(); let result = client.describe_reserved_nodes(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_describe_resize() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-describe-resize.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = DescribeResizeMessage::default(); let result = client.describe_resize(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_modify_cluster_parameter_group() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-modify-cluster-parameter-group.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ModifyClusterParameterGroupMessage::default(); let result = client.modify_cluster_parameter_group(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_purchase_reserved_node_offering() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-purchase-reserved-node-offering.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = PurchaseReservedNodeOfferingMessage::default(); let result = client.purchase_reserved_node_offering(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_reboot_cluster() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-reboot-cluster.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = RebootClusterMessage::default(); let result = client.reboot_cluster(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_reset_cluster_parameter_group() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-reset-cluster-parameter-group.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ResetClusterParameterGroupMessage::default(); let result = client.reset_cluster_parameter_group(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_restore_from_cluster_snapshot() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-restore-from-cluster-snapshot.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = RestoreFromClusterSnapshotMessage::default(); let result = client.restore_from_cluster_snapshot(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } #[tokio::test] async fn test_parse_valid_redshift_revoke_cluster_security_group_ingress() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "redshift-revoke-cluster-security-group-ingress.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = RedshiftClient::new_with(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = RevokeClusterSecurityGroupIngressMessage::default(); let result = client.revoke_cluster_security_group_ingress(request).await; assert!(result.is_ok(), "parse error: {:?}", result); } }