diff --git a/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts index e45111ea4acb..580cfa3ff470 100644 --- a/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts @@ -526,6 +526,33 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * Connectivity: "PUBLIC" || "PRIVATE", // required * }, * }, + * SnowflakeDestinationConfiguration: { // SnowflakeDestinationConfiguration + * AccountUrl: "STRING_VALUE", // required + * PrivateKey: "STRING_VALUE", // required + * KeyPassphrase: "STRING_VALUE", + * User: "STRING_VALUE", // required + * Database: "STRING_VALUE", // required + * Schema: "STRING_VALUE", // required + * Table: "STRING_VALUE", // required + * SnowflakeRoleConfiguration: { // SnowflakeRoleConfiguration + * Enabled: true || false, + * SnowflakeRole: "STRING_VALUE", + * }, + * DataLoadingOption: "JSON_MAPPING" || "VARIANT_CONTENT_MAPPING" || "VARIANT_CONTENT_AND_METADATA_MAPPING", + * MetaDataColumnName: "STRING_VALUE", + * ContentColumnName: "STRING_VALUE", + * SnowflakeVpcConfiguration: { // SnowflakeVpcConfiguration + * PrivateLinkVpceId: "STRING_VALUE", // required + * }, + * CloudWatchLoggingOptions: "", + * ProcessingConfiguration: "", + * RoleARN: "STRING_VALUE", // required + * RetryOptions: { // SnowflakeRetryOptions + * DurationInSeconds: Number("int"), + * }, + * S3BackupMode: "FailedDataOnly" || "AllData", + * S3Configuration: "", // required + * }, * }; * const command = new CreateDeliveryStreamCommand(input); * const response = await client.send(command); diff --git a/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts index 06c38e18e7fb..6b6d830794b3 100644 --- a/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts @@ -462,6 +462,31 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // S3BackupMode: "FailedDataOnly" || "AllData", * // S3DestinationDescription: "", * // }, + * // SnowflakeDestinationDescription: { // SnowflakeDestinationDescription + * // AccountUrl: "STRING_VALUE", + * // User: "STRING_VALUE", + * // Database: "STRING_VALUE", + * // Schema: "STRING_VALUE", + * // Table: "STRING_VALUE", + * // SnowflakeRoleConfiguration: { // SnowflakeRoleConfiguration + * // Enabled: true || false, + * // SnowflakeRole: "STRING_VALUE", + * // }, + * // DataLoadingOption: "JSON_MAPPING" || "VARIANT_CONTENT_MAPPING" || "VARIANT_CONTENT_AND_METADATA_MAPPING", + * // MetaDataColumnName: "STRING_VALUE", + * // ContentColumnName: "STRING_VALUE", + * // SnowflakeVpcConfiguration: { // SnowflakeVpcConfiguration + * // PrivateLinkVpceId: "STRING_VALUE", // required + * // }, + * // CloudWatchLoggingOptions: "", + * // ProcessingConfiguration: "", + * // RoleARN: "STRING_VALUE", + * // RetryOptions: { // SnowflakeRetryOptions + * // DurationInSeconds: Number("int"), + * // }, + * // S3BackupMode: "FailedDataOnly" || "AllData", + * // S3DestinationDescription: "", + * // }, * // AmazonOpenSearchServerlessDestinationDescription: { // AmazonOpenSearchServerlessDestinationDescription * // RoleARN: "STRING_VALUE", * // CollectionEndpoint: "STRING_VALUE", diff --git a/clients/client-firehose/src/commands/UpdateDestinationCommand.ts b/clients/client-firehose/src/commands/UpdateDestinationCommand.ts index aa6478d0297b..4434c742a566 100644 --- a/clients/client-firehose/src/commands/UpdateDestinationCommand.ts +++ b/clients/client-firehose/src/commands/UpdateDestinationCommand.ts @@ -429,6 +429,30 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput, * ProcessingConfiguration: "", * CloudWatchLoggingOptions: "", * }, + * SnowflakeDestinationUpdate: { // SnowflakeDestinationUpdate + * AccountUrl: "STRING_VALUE", + * PrivateKey: "STRING_VALUE", + * KeyPassphrase: "STRING_VALUE", + * User: "STRING_VALUE", + * Database: "STRING_VALUE", + * Schema: "STRING_VALUE", + * Table: "STRING_VALUE", + * SnowflakeRoleConfiguration: { // SnowflakeRoleConfiguration + * Enabled: true || false, + * SnowflakeRole: "STRING_VALUE", + * }, + * DataLoadingOption: "JSON_MAPPING" || "VARIANT_CONTENT_MAPPING" || "VARIANT_CONTENT_AND_METADATA_MAPPING", + * MetaDataColumnName: "STRING_VALUE", + * ContentColumnName: "STRING_VALUE", + * CloudWatchLoggingOptions: "", + * ProcessingConfiguration: "", + * RoleARN: "STRING_VALUE", + * RetryOptions: { // SnowflakeRetryOptions + * DurationInSeconds: Number("int"), + * }, + * S3BackupMode: "FailedDataOnly" || "AllData", + * S3Update: "", + * }, * }; * const command = new UpdateDestinationCommand(input); * const response = await client.send(command); diff --git a/clients/client-firehose/src/models/models_0.ts b/clients/client-firehose/src/models/models_0.ts index 3e2318b38007..bc53466d21df 100644 --- a/clients/client-firehose/src/models/models_0.ts +++ b/clients/client-firehose/src/models/models_0.ts @@ -2596,6 +2596,204 @@ export interface RedshiftDestinationConfiguration { CloudWatchLoggingOptions?: CloudWatchLoggingOptions; } +/** + * @public + * @enum + */ +export const SnowflakeDataLoadingOption = { + JSON_MAPPING: "JSON_MAPPING", + VARIANT_CONTENT_AND_METADATA_MAPPING: "VARIANT_CONTENT_AND_METADATA_MAPPING", + VARIANT_CONTENT_MAPPING: "VARIANT_CONTENT_MAPPING", +} as const; + +/** + * @public + */ +export type SnowflakeDataLoadingOption = (typeof SnowflakeDataLoadingOption)[keyof typeof SnowflakeDataLoadingOption]; + +/** + * @public + *

Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint. + * + * After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. + * + * Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. + * + * Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. + * + * If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.

+ */ +export interface SnowflakeRetryOptions { + /** + * @public + *

the time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

+ */ + DurationInSeconds?: number; +} + +/** + * @public + * @enum + */ +export const SnowflakeS3BackupMode = { + AllData: "AllData", + FailedDataOnly: "FailedDataOnly", +} as const; + +/** + * @public + */ +export type SnowflakeS3BackupMode = (typeof SnowflakeS3BackupMode)[keyof typeof SnowflakeS3BackupMode]; + +/** + * @public + *

Optionally configure a Snowflake role. Otherwise the default user role will be used.

+ */ +export interface SnowflakeRoleConfiguration { + /** + * @public + *

Enable Snowflake role

+ */ + Enabled?: boolean; + + /** + * @public + *

The Snowflake role you wish to configure

+ */ + SnowflakeRole?: string; +} + +/** + * @public + *

Configure a Snowflake VPC

+ */ +export interface SnowflakeVpcConfiguration { + /** + * @public + *

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is + * com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake + *

+ */ + PrivateLinkVpceId: string | undefined; +} + +/** + * @public + *

Configure Snowflake destination

+ */ +export interface SnowflakeDestinationConfiguration { + /** + * @public + *

URL for accessing your Snowflake account. This URL must include your account identifier. + * Note that the protocol (https://) and port number are optional.

+ */ + AccountUrl: string | undefined; + + /** + * @public + *

The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.

+ */ + PrivateKey: string | undefined; + + /** + * @public + *

Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.

+ */ + KeyPassphrase?: string; + + /** + * @public + *

User login name for the Snowflake account.

+ */ + User: string | undefined; + + /** + * @public + *

All data in Snowflake is maintained in databases.

+ */ + Database: string | undefined; + + /** + * @public + *

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

+ */ + Schema: string | undefined; + + /** + * @public + *

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

+ */ + Table: string | undefined; + + /** + * @public + *

Optionally configure a Snowflake role. Otherwise the default user role will be used.

+ */ + SnowflakeRoleConfiguration?: SnowflakeRoleConfiguration; + + /** + * @public + *

Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

+ */ + DataLoadingOption?: SnowflakeDataLoadingOption; + + /** + * @public + *

The name of the record metadata column

+ */ + MetaDataColumnName?: string; + + /** + * @public + *

The name of the record content column

+ */ + ContentColumnName?: string; + + /** + * @public + *

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is + * com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake + *

+ */ + SnowflakeVpcConfiguration?: SnowflakeVpcConfiguration; + + /** + * @public + *

Describes the Amazon CloudWatch logging options for your delivery stream.

+ */ + CloudWatchLoggingOptions?: CloudWatchLoggingOptions; + + /** + * @public + *

Describes a data processing configuration.

+ */ + ProcessingConfiguration?: ProcessingConfiguration; + + /** + * @public + *

The Amazon Resource Name (ARN) of the Snowflake role

+ */ + RoleARN: string | undefined; + + /** + * @public + *

The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

+ */ + RetryOptions?: SnowflakeRetryOptions; + + /** + * @public + *

Choose an S3 backup mode

+ */ + S3BackupMode?: SnowflakeS3BackupMode; + + /** + * @public + *

Describes the configuration of a destination in Amazon S3.

+ */ + S3Configuration: S3DestinationConfiguration | undefined; +} + /** * @public *

The buffering options. If no value is specified, the default values for Splunk are used.

@@ -2879,6 +3077,12 @@ export interface CreateDeliveryStreamInput { * stream.

*/ MSKSourceConfiguration?: MSKSourceConfiguration; + + /** + * @public + *

Configure Snowflake destination

+ */ + SnowflakeDestinationConfiguration?: SnowflakeDestinationConfiguration; } /** @@ -3508,6 +3712,111 @@ export interface RedshiftDestinationDescription { CloudWatchLoggingOptions?: CloudWatchLoggingOptions; } +/** + * @public + *

Optional Snowflake destination description

+ */ +export interface SnowflakeDestinationDescription { + /** + * @public + *

URL for accessing your Snowflake account. This URL must include your account identifier. + * Note that the protocol (https://) and port number are optional.

+ */ + AccountUrl?: string; + + /** + * @public + *

User login name for the Snowflake account.

+ */ + User?: string; + + /** + * @public + *

All data in Snowflake is maintained in databases.

+ */ + Database?: string; + + /** + * @public + *

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

+ */ + Schema?: string; + + /** + * @public + *

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

+ */ + Table?: string; + + /** + * @public + *

Optionally configure a Snowflake role. Otherwise the default user role will be used.

+ */ + SnowflakeRoleConfiguration?: SnowflakeRoleConfiguration; + + /** + * @public + *

Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

+ */ + DataLoadingOption?: SnowflakeDataLoadingOption; + + /** + * @public + *

The name of the record metadata column

+ */ + MetaDataColumnName?: string; + + /** + * @public + *

The name of the record content column

+ */ + ContentColumnName?: string; + + /** + * @public + *

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is + * com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake + *

+ */ + SnowflakeVpcConfiguration?: SnowflakeVpcConfiguration; + + /** + * @public + *

Describes the Amazon CloudWatch logging options for your delivery stream.

+ */ + CloudWatchLoggingOptions?: CloudWatchLoggingOptions; + + /** + * @public + *

Describes a data processing configuration.

+ */ + ProcessingConfiguration?: ProcessingConfiguration; + + /** + * @public + *

The Amazon Resource Name (ARN) of the Snowflake role

+ */ + RoleARN?: string; + + /** + * @public + *

The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

+ */ + RetryOptions?: SnowflakeRetryOptions; + + /** + * @public + *

Choose an S3 backup mode

+ */ + S3BackupMode?: SnowflakeS3BackupMode; + + /** + * @public + *

Describes a destination in Amazon S3.

+ */ + S3DestinationDescription?: S3DestinationDescription; +} + /** * @public *

Describes a destination in Splunk.

@@ -3637,6 +3946,12 @@ export interface DestinationDescription { */ HttpEndpointDestinationDescription?: HttpEndpointDestinationDescription; + /** + * @public + *

Optional description for the destination

+ */ + SnowflakeDestinationDescription?: SnowflakeDestinationDescription; + /** * @public *

The destination in the Serverless offering for Amazon OpenSearch Service.

@@ -4569,6 +4884,123 @@ export interface RedshiftDestinationUpdate { CloudWatchLoggingOptions?: CloudWatchLoggingOptions; } +/** + * @public + *

Update to configuration settings

+ */ +export interface SnowflakeDestinationUpdate { + /** + * @public + *

URL for accessing your Snowflake account. This URL must include your account identifier. + * Note that the protocol (https://) and port number are optional.

+ */ + AccountUrl?: string; + + /** + * @public + *

The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.

+ */ + PrivateKey?: string; + + /** + * @public + *

Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.

+ */ + KeyPassphrase?: string; + + /** + * @public + *

User login name for the Snowflake account.

+ */ + User?: string; + + /** + * @public + *

All data in Snowflake is maintained in databases.

+ */ + Database?: string; + + /** + * @public + *

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

+ */ + Schema?: string; + + /** + * @public + *

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

+ */ + Table?: string; + + /** + * @public + *

Optionally configure a Snowflake role. Otherwise the default user role will be used.

+ */ + SnowflakeRoleConfiguration?: SnowflakeRoleConfiguration; + + /** + * @public + *

JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

+ */ + DataLoadingOption?: SnowflakeDataLoadingOption; + + /** + * @public + *

The name of the record metadata column

+ */ + MetaDataColumnName?: string; + + /** + * @public + *

The name of the content metadata column

+ */ + ContentColumnName?: string; + + /** + * @public + *

Describes the Amazon CloudWatch logging options for your delivery stream.

+ */ + CloudWatchLoggingOptions?: CloudWatchLoggingOptions; + + /** + * @public + *

Describes a data processing configuration.

+ */ + ProcessingConfiguration?: ProcessingConfiguration; + + /** + * @public + *

The Amazon Resource Name (ARN) of the Snowflake role

+ */ + RoleARN?: string; + + /** + * @public + *

Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint. + * + * After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. + * + * Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. + * + * Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. + * + * If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.

+ */ + RetryOptions?: SnowflakeRetryOptions; + + /** + * @public + *

Choose an S3 backup mode

+ */ + S3BackupMode?: SnowflakeS3BackupMode; + + /** + * @public + *

Describes an update for a destination in Amazon S3.

+ */ + S3Update?: S3DestinationUpdate; +} + /** * @public *

Describes an update for a destination in Splunk.

@@ -4724,6 +5156,12 @@ export interface UpdateDestinationInput { * Service.

*/ AmazonOpenSearchServerlessDestinationUpdate?: AmazonOpenSearchServerlessDestinationUpdate; + + /** + * @public + *

Update to the Snowflake destination condiguration settings

+ */ + SnowflakeDestinationUpdate?: SnowflakeDestinationUpdate; } /** @@ -4783,6 +5221,44 @@ export const RedshiftDestinationConfigurationFilterSensitiveLog = (obj: Redshift ...(obj.Password && { Password: SENSITIVE_STRING }), }); +/** + * @internal + */ +export const SnowflakeRoleConfigurationFilterSensitiveLog = (obj: SnowflakeRoleConfiguration): any => ({ + ...obj, + ...(obj.SnowflakeRole && { SnowflakeRole: SENSITIVE_STRING }), +}); + +/** + * @internal + */ +export const SnowflakeVpcConfigurationFilterSensitiveLog = (obj: SnowflakeVpcConfiguration): any => ({ + ...obj, + ...(obj.PrivateLinkVpceId && { PrivateLinkVpceId: SENSITIVE_STRING }), +}); + +/** + * @internal + */ +export const SnowflakeDestinationConfigurationFilterSensitiveLog = (obj: SnowflakeDestinationConfiguration): any => ({ + ...obj, + ...(obj.AccountUrl && { AccountUrl: SENSITIVE_STRING }), + ...(obj.PrivateKey && { PrivateKey: SENSITIVE_STRING }), + ...(obj.KeyPassphrase && { KeyPassphrase: SENSITIVE_STRING }), + ...(obj.User && { User: SENSITIVE_STRING }), + ...(obj.Database && { Database: SENSITIVE_STRING }), + ...(obj.Schema && { Schema: SENSITIVE_STRING }), + ...(obj.Table && { Table: SENSITIVE_STRING }), + ...(obj.SnowflakeRoleConfiguration && { + SnowflakeRoleConfiguration: SnowflakeRoleConfigurationFilterSensitiveLog(obj.SnowflakeRoleConfiguration), + }), + ...(obj.MetaDataColumnName && { MetaDataColumnName: SENSITIVE_STRING }), + ...(obj.ContentColumnName && { ContentColumnName: SENSITIVE_STRING }), + ...(obj.SnowflakeVpcConfiguration && { + SnowflakeVpcConfiguration: SnowflakeVpcConfigurationFilterSensitiveLog(obj.SnowflakeVpcConfiguration), + }), +}); + /** * @internal */ @@ -4798,6 +5274,11 @@ export const CreateDeliveryStreamInputFilterSensitiveLog = (obj: CreateDeliveryS obj.HttpEndpointDestinationConfiguration ), }), + ...(obj.SnowflakeDestinationConfiguration && { + SnowflakeDestinationConfiguration: SnowflakeDestinationConfigurationFilterSensitiveLog( + obj.SnowflakeDestinationConfiguration + ), + }), }); /** @@ -4829,6 +5310,26 @@ export const RedshiftDestinationDescriptionFilterSensitiveLog = (obj: RedshiftDe ...(obj.Username && { Username: SENSITIVE_STRING }), }); +/** + * @internal + */ +export const SnowflakeDestinationDescriptionFilterSensitiveLog = (obj: SnowflakeDestinationDescription): any => ({ + ...obj, + ...(obj.AccountUrl && { AccountUrl: SENSITIVE_STRING }), + ...(obj.User && { User: SENSITIVE_STRING }), + ...(obj.Database && { Database: SENSITIVE_STRING }), + ...(obj.Schema && { Schema: SENSITIVE_STRING }), + ...(obj.Table && { Table: SENSITIVE_STRING }), + ...(obj.SnowflakeRoleConfiguration && { + SnowflakeRoleConfiguration: SnowflakeRoleConfigurationFilterSensitiveLog(obj.SnowflakeRoleConfiguration), + }), + ...(obj.MetaDataColumnName && { MetaDataColumnName: SENSITIVE_STRING }), + ...(obj.ContentColumnName && { ContentColumnName: SENSITIVE_STRING }), + ...(obj.SnowflakeVpcConfiguration && { + SnowflakeVpcConfiguration: SnowflakeVpcConfigurationFilterSensitiveLog(obj.SnowflakeVpcConfiguration), + }), +}); + /** * @internal */ @@ -4844,6 +5345,11 @@ export const DestinationDescriptionFilterSensitiveLog = (obj: DestinationDescrip obj.HttpEndpointDestinationDescription ), }), + ...(obj.SnowflakeDestinationDescription && { + SnowflakeDestinationDescription: SnowflakeDestinationDescriptionFilterSensitiveLog( + obj.SnowflakeDestinationDescription + ), + }), }); /** @@ -4888,6 +5394,25 @@ export const RedshiftDestinationUpdateFilterSensitiveLog = (obj: RedshiftDestina ...(obj.Password && { Password: SENSITIVE_STRING }), }); +/** + * @internal + */ +export const SnowflakeDestinationUpdateFilterSensitiveLog = (obj: SnowflakeDestinationUpdate): any => ({ + ...obj, + ...(obj.AccountUrl && { AccountUrl: SENSITIVE_STRING }), + ...(obj.PrivateKey && { PrivateKey: SENSITIVE_STRING }), + ...(obj.KeyPassphrase && { KeyPassphrase: SENSITIVE_STRING }), + ...(obj.User && { User: SENSITIVE_STRING }), + ...(obj.Database && { Database: SENSITIVE_STRING }), + ...(obj.Schema && { Schema: SENSITIVE_STRING }), + ...(obj.Table && { Table: SENSITIVE_STRING }), + ...(obj.SnowflakeRoleConfiguration && { + SnowflakeRoleConfiguration: SnowflakeRoleConfigurationFilterSensitiveLog(obj.SnowflakeRoleConfiguration), + }), + ...(obj.MetaDataColumnName && { MetaDataColumnName: SENSITIVE_STRING }), + ...(obj.ContentColumnName && { ContentColumnName: SENSITIVE_STRING }), +}); + /** * @internal */ @@ -4899,4 +5424,7 @@ export const UpdateDestinationInputFilterSensitiveLog = (obj: UpdateDestinationI ...(obj.HttpEndpointDestinationUpdate && { HttpEndpointDestinationUpdate: HttpEndpointDestinationUpdateFilterSensitiveLog(obj.HttpEndpointDestinationUpdate), }), + ...(obj.SnowflakeDestinationUpdate && { + SnowflakeDestinationUpdate: SnowflakeDestinationUpdateFilterSensitiveLog(obj.SnowflakeDestinationUpdate), + }), }); diff --git a/clients/client-firehose/src/protocols/Aws_json1_1.ts b/clients/client-firehose/src/protocols/Aws_json1_1.ts index e1395e0e0ca3..2e7226954d95 100644 --- a/clients/client-firehose/src/protocols/Aws_json1_1.ts +++ b/clients/client-firehose/src/protocols/Aws_json1_1.ts @@ -133,6 +133,11 @@ import { SchemaConfiguration, Serializer, ServiceUnavailableException, + SnowflakeDestinationConfiguration, + SnowflakeDestinationUpdate, + SnowflakeRetryOptions, + SnowflakeRoleConfiguration, + SnowflakeVpcConfiguration, SourceDescription, SplunkBufferingHints, SplunkDestinationConfiguration, @@ -1110,6 +1115,7 @@ const se_CreateDeliveryStreamInput = (input: CreateDeliveryStreamInput, context: MSKSourceConfiguration: _json, RedshiftDestinationConfiguration: _json, S3DestinationConfiguration: _json, + SnowflakeDestinationConfiguration: _json, SplunkDestinationConfiguration: _json, Tags: _json, }); @@ -1338,6 +1344,16 @@ const se_Serializer = (input: Serializer, context: __SerdeContext): any => { }); }; +// se_SnowflakeDestinationConfiguration omitted. + +// se_SnowflakeDestinationUpdate omitted. + +// se_SnowflakeRetryOptions omitted. + +// se_SnowflakeRoleConfiguration omitted. + +// se_SnowflakeVpcConfiguration omitted. + // se_SplunkBufferingHints omitted. // se_SplunkDestinationConfiguration omitted. @@ -1377,6 +1393,7 @@ const se_UpdateDestinationInput = (input: UpdateDestinationInput, context: __Ser HttpEndpointDestinationUpdate: _json, RedshiftDestinationUpdate: _json, S3DestinationUpdate: _json, + SnowflakeDestinationUpdate: _json, SplunkDestinationUpdate: _json, }); }; @@ -1474,6 +1491,7 @@ const de_DestinationDescription = (output: any, context: __SerdeContext): Destin HttpEndpointDestinationDescription: _json, RedshiftDestinationDescription: _json, S3DestinationDescription: _json, + SnowflakeDestinationDescription: _json, SplunkDestinationDescription: _json, }) as any; }; @@ -1666,6 +1684,14 @@ const de_Serializer = (output: any, context: __SerdeContext): Serializer => { // de_ServiceUnavailableException omitted. +// de_SnowflakeDestinationDescription omitted. + +// de_SnowflakeRetryOptions omitted. + +// de_SnowflakeRoleConfiguration omitted. + +// de_SnowflakeVpcConfiguration omitted. + /** * deserializeAws_json1_1SourceDescription */ diff --git a/codegen/sdk-codegen/aws-models/firehose.json b/codegen/sdk-codegen/aws-models/firehose.json index 111f8bb4d9da..fbea373af637 100644 --- a/codegen/sdk-codegen/aws-models/firehose.json +++ b/codegen/sdk-codegen/aws-models/firehose.json @@ -1030,6 +1030,12 @@ }, "MSKSourceConfiguration": { "target": "com.amazonaws.firehose#MSKSourceConfiguration" + }, + "SnowflakeDestinationConfiguration": { + "target": "com.amazonaws.firehose#SnowflakeDestinationConfiguration", + "traits": { + "smithy.api#documentation": "

Configure Snowflake destination

" + } } }, "traits": { @@ -1689,6 +1695,12 @@ "smithy.api#documentation": "

Describes the specified HTTP endpoint destination.

" } }, + "SnowflakeDestinationDescription": { + "target": "com.amazonaws.firehose#SnowflakeDestinationDescription", + "traits": { + "smithy.api#documentation": "

Optional description for the destination

" + } + }, "AmazonOpenSearchServerlessDestinationDescription": { "target": "com.amazonaws.firehose#AmazonOpenSearchServerlessDestinationDescription", "traits": { @@ -5784,6 +5796,530 @@ } } }, + "com.amazonaws.firehose#SnowflakeAccountUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 24, + "max": 2048 + }, + "smithy.api#pattern": "^.+?\\.snowflakecomputing\\.com$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeContentColumnName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeDataLoadingOption": { + "type": "enum", + "members": { + "JSON_MAPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JSON_MAPPING" + } + }, + "VARIANT_CONTENT_MAPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VARIANT_CONTENT_MAPPING" + } + }, + "VARIANT_CONTENT_AND_METADATA_MAPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VARIANT_CONTENT_AND_METADATA_MAPPING" + } + } + } + }, + "com.amazonaws.firehose#SnowflakeDatabase": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeDestinationConfiguration": { + "type": "structure", + "members": { + "AccountUrl": { + "target": "com.amazonaws.firehose#SnowflakeAccountUrl", + "traits": { + "smithy.api#documentation": "

URL for accessing your Snowflake account. This URL must include your account identifier. \n Note that the protocol (https://) and port number are optional.

", + "smithy.api#required": {} + } + }, + "PrivateKey": { + "target": "com.amazonaws.firehose#SnowflakePrivateKey", + "traits": { + "smithy.api#documentation": "

The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.

", + "smithy.api#required": {} + } + }, + "KeyPassphrase": { + "target": "com.amazonaws.firehose#SnowflakeKeyPassphrase", + "traits": { + "smithy.api#documentation": "

Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.

" + } + }, + "User": { + "target": "com.amazonaws.firehose#SnowflakeUser", + "traits": { + "smithy.api#documentation": "

User login name for the Snowflake account.

", + "smithy.api#required": {} + } + }, + "Database": { + "target": "com.amazonaws.firehose#SnowflakeDatabase", + "traits": { + "smithy.api#documentation": "

All data in Snowflake is maintained in databases.

", + "smithy.api#required": {} + } + }, + "Schema": { + "target": "com.amazonaws.firehose#SnowflakeSchema", + "traits": { + "smithy.api#documentation": "

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

", + "smithy.api#required": {} + } + }, + "Table": { + "target": "com.amazonaws.firehose#SnowflakeTable", + "traits": { + "smithy.api#documentation": "

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

", + "smithy.api#required": {} + } + }, + "SnowflakeRoleConfiguration": { + "target": "com.amazonaws.firehose#SnowflakeRoleConfiguration", + "traits": { + "smithy.api#documentation": "

Optionally configure a Snowflake role. Otherwise the default user role will be used.

" + } + }, + "DataLoadingOption": { + "target": "com.amazonaws.firehose#SnowflakeDataLoadingOption", + "traits": { + "smithy.api#documentation": "

Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

" + } + }, + "MetaDataColumnName": { + "target": "com.amazonaws.firehose#SnowflakeMetaDataColumnName", + "traits": { + "smithy.api#documentation": "

The name of the record metadata column

" + } + }, + "ContentColumnName": { + "target": "com.amazonaws.firehose#SnowflakeContentColumnName", + "traits": { + "smithy.api#documentation": "

The name of the record content column

" + } + }, + "SnowflakeVpcConfiguration": { + "target": "com.amazonaws.firehose#SnowflakeVpcConfiguration", + "traits": { + "smithy.api#documentation": "

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is\n com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake\n

" + } + }, + "CloudWatchLoggingOptions": { + "target": "com.amazonaws.firehose#CloudWatchLoggingOptions" + }, + "ProcessingConfiguration": { + "target": "com.amazonaws.firehose#ProcessingConfiguration" + }, + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Snowflake role

", + "smithy.api#required": {} + } + }, + "RetryOptions": { + "target": "com.amazonaws.firehose#SnowflakeRetryOptions", + "traits": { + "smithy.api#documentation": "

The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

" + } + }, + "S3BackupMode": { + "target": "com.amazonaws.firehose#SnowflakeS3BackupMode", + "traits": { + "smithy.api#documentation": "

Choose an S3 backup mode

" + } + }, + "S3Configuration": { + "target": "com.amazonaws.firehose#S3DestinationConfiguration", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Configure Snowflake destination

" + } + }, + "com.amazonaws.firehose#SnowflakeDestinationDescription": { + "type": "structure", + "members": { + "AccountUrl": { + "target": "com.amazonaws.firehose#SnowflakeAccountUrl", + "traits": { + "smithy.api#documentation": "

URL for accessing your Snowflake account. This URL must include your account identifier. \n Note that the protocol (https://) and port number are optional.

" + } + }, + "User": { + "target": "com.amazonaws.firehose#SnowflakeUser", + "traits": { + "smithy.api#documentation": "

User login name for the Snowflake account.

" + } + }, + "Database": { + "target": "com.amazonaws.firehose#SnowflakeDatabase", + "traits": { + "smithy.api#documentation": "

All data in Snowflake is maintained in databases.

" + } + }, + "Schema": { + "target": "com.amazonaws.firehose#SnowflakeSchema", + "traits": { + "smithy.api#documentation": "

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

" + } + }, + "Table": { + "target": "com.amazonaws.firehose#SnowflakeTable", + "traits": { + "smithy.api#documentation": "

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

" + } + }, + "SnowflakeRoleConfiguration": { + "target": "com.amazonaws.firehose#SnowflakeRoleConfiguration", + "traits": { + "smithy.api#documentation": "

Optionally configure a Snowflake role. Otherwise the default user role will be used.

" + } + }, + "DataLoadingOption": { + "target": "com.amazonaws.firehose#SnowflakeDataLoadingOption", + "traits": { + "smithy.api#documentation": "

Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

" + } + }, + "MetaDataColumnName": { + "target": "com.amazonaws.firehose#SnowflakeMetaDataColumnName", + "traits": { + "smithy.api#documentation": "

The name of the record metadata column

" + } + }, + "ContentColumnName": { + "target": "com.amazonaws.firehose#SnowflakeContentColumnName", + "traits": { + "smithy.api#documentation": "

The name of the record content column

" + } + }, + "SnowflakeVpcConfiguration": { + "target": "com.amazonaws.firehose#SnowflakeVpcConfiguration", + "traits": { + "smithy.api#documentation": "

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is\n com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake\n

" + } + }, + "CloudWatchLoggingOptions": { + "target": "com.amazonaws.firehose#CloudWatchLoggingOptions" + }, + "ProcessingConfiguration": { + "target": "com.amazonaws.firehose#ProcessingConfiguration" + }, + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Snowflake role

" + } + }, + "RetryOptions": { + "target": "com.amazonaws.firehose#SnowflakeRetryOptions", + "traits": { + "smithy.api#documentation": "

The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

" + } + }, + "S3BackupMode": { + "target": "com.amazonaws.firehose#SnowflakeS3BackupMode", + "traits": { + "smithy.api#documentation": "

Choose an S3 backup mode

" + } + }, + "S3DestinationDescription": { + "target": "com.amazonaws.firehose#S3DestinationDescription" + } + }, + "traits": { + "smithy.api#documentation": "

Optional Snowflake destination description

" + } + }, + "com.amazonaws.firehose#SnowflakeDestinationUpdate": { + "type": "structure", + "members": { + "AccountUrl": { + "target": "com.amazonaws.firehose#SnowflakeAccountUrl", + "traits": { + "smithy.api#documentation": "

URL for accessing your Snowflake account. This URL must include your account identifier. \n Note that the protocol (https://) and port number are optional.

" + } + }, + "PrivateKey": { + "target": "com.amazonaws.firehose#SnowflakePrivateKey", + "traits": { + "smithy.api#documentation": "

The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.

" + } + }, + "KeyPassphrase": { + "target": "com.amazonaws.firehose#SnowflakeKeyPassphrase", + "traits": { + "smithy.api#documentation": "

Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.

" + } + }, + "User": { + "target": "com.amazonaws.firehose#SnowflakeUser", + "traits": { + "smithy.api#documentation": "

User login name for the Snowflake account.

" + } + }, + "Database": { + "target": "com.amazonaws.firehose#SnowflakeDatabase", + "traits": { + "smithy.api#documentation": "

All data in Snowflake is maintained in databases.

" + } + }, + "Schema": { + "target": "com.amazonaws.firehose#SnowflakeSchema", + "traits": { + "smithy.api#documentation": "

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

" + } + }, + "Table": { + "target": "com.amazonaws.firehose#SnowflakeTable", + "traits": { + "smithy.api#documentation": "

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

" + } + }, + "SnowflakeRoleConfiguration": { + "target": "com.amazonaws.firehose#SnowflakeRoleConfiguration", + "traits": { + "smithy.api#documentation": "

Optionally configure a Snowflake role. Otherwise the default user role will be used.

" + } + }, + "DataLoadingOption": { + "target": "com.amazonaws.firehose#SnowflakeDataLoadingOption", + "traits": { + "smithy.api#documentation": "

JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

" + } + }, + "MetaDataColumnName": { + "target": "com.amazonaws.firehose#SnowflakeMetaDataColumnName", + "traits": { + "smithy.api#documentation": "

The name of the record metadata column

" + } + }, + "ContentColumnName": { + "target": "com.amazonaws.firehose#SnowflakeContentColumnName", + "traits": { + "smithy.api#documentation": "

The name of the content metadata column

" + } + }, + "CloudWatchLoggingOptions": { + "target": "com.amazonaws.firehose#CloudWatchLoggingOptions" + }, + "ProcessingConfiguration": { + "target": "com.amazonaws.firehose#ProcessingConfiguration" + }, + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Snowflake role

" + } + }, + "RetryOptions": { + "target": "com.amazonaws.firehose#SnowflakeRetryOptions", + "traits": { + "smithy.api#documentation": "

Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint.\n \n After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket.\n \n Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint.\n \n Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired.\n \n If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.

" + } + }, + "S3BackupMode": { + "target": "com.amazonaws.firehose#SnowflakeS3BackupMode", + "traits": { + "smithy.api#documentation": "

Choose an S3 backup mode

" + } + }, + "S3Update": { + "target": "com.amazonaws.firehose#S3DestinationUpdate" + } + }, + "traits": { + "smithy.api#documentation": "

Update to configuration settings

" + } + }, + "com.amazonaws.firehose#SnowflakeKeyPassphrase": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 7, + "max": 255 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeMetaDataColumnName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakePrivateKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 256, + "max": 4096 + }, + "smithy.api#pattern": "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakePrivateLinkVpceId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 47, + "max": 255 + }, + "smithy.api#pattern": "^([a-zA-Z0-9\\-\\_]+\\.){2,3}vpce\\.[a-zA-Z0-9\\-]*\\.vpce-svc\\-[a-zA-Z0-9\\-]{17}$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeRetryDurationInSeconds": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 7200 + } + } + }, + "com.amazonaws.firehose#SnowflakeRetryOptions": { + "type": "structure", + "members": { + "DurationInSeconds": { + "target": "com.amazonaws.firehose#SnowflakeRetryDurationInSeconds", + "traits": { + "smithy.api#documentation": "

the time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint.\n \n After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket.\n \n Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint.\n \n Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired.\n \n If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.

" + } + }, + "com.amazonaws.firehose#SnowflakeRole": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeRoleConfiguration": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.firehose#BooleanObject", + "traits": { + "smithy.api#documentation": "

Enable Snowflake role

" + } + }, + "SnowflakeRole": { + "target": "com.amazonaws.firehose#SnowflakeRole", + "traits": { + "smithy.api#documentation": "

The Snowflake role you wish to configure

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Optionally configure a Snowflake role. Otherwise the default user role will be used.

" + } + }, + "com.amazonaws.firehose#SnowflakeS3BackupMode": { + "type": "enum", + "members": { + "FailedDataOnly": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FailedDataOnly" + } + }, + "AllData": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AllData" + } + } + } + }, + "com.amazonaws.firehose#SnowflakeSchema": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeTable": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeUser": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.firehose#SnowflakeVpcConfiguration": { + "type": "structure", + "members": { + "PrivateLinkVpceId": { + "target": "com.amazonaws.firehose#SnowflakePrivateLinkVpceId", + "traits": { + "smithy.api#documentation": "

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is\n com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Configure a Snowflake VPC

" + } + }, "com.amazonaws.firehose#SourceDescription": { "type": "structure", "members": { @@ -6493,6 +7029,12 @@ "traits": { "smithy.api#documentation": "

Describes an update for a destination in the Serverless offering for Amazon OpenSearch\n Service.

" } + }, + "SnowflakeDestinationUpdate": { + "target": "com.amazonaws.firehose#SnowflakeDestinationUpdate", + "traits": { + "smithy.api#documentation": "

Update to the Snowflake destination condiguration settings

" + } } }, "traits": {