airbyte.DestinationAwsDatalake
Explore with Pulumi AI
DestinationAwsDatalake Resource
Example Usage
Coming soon!
Coming soon!
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.airbyte.DestinationAwsDatalake;
import com.pulumi.airbyte.DestinationAwsDatalakeArgs;
import com.pulumi.airbyte.inputs.DestinationAwsDatalakeConfigurationArgs;
import com.pulumi.airbyte.inputs.DestinationAwsDatalakeConfigurationCredentialsArgs;
import com.pulumi.airbyte.inputs.DestinationAwsDatalakeConfigurationCredentialsIamRoleArgs;
import com.pulumi.airbyte.inputs.DestinationAwsDatalakeConfigurationCredentialsIamUserArgs;
import com.pulumi.airbyte.inputs.DestinationAwsDatalakeConfigurationFormatArgs;
import com.pulumi.airbyte.inputs.DestinationAwsDatalakeConfigurationFormatJsonLinesNewlineDelimitedJsonArgs;
import com.pulumi.airbyte.inputs.DestinationAwsDatalakeConfigurationFormatParquetColumnarStorageArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var myDestinationAwsdatalake = new DestinationAwsDatalake("myDestinationAwsdatalake", DestinationAwsDatalakeArgs.builder()
            .configuration(DestinationAwsDatalakeConfigurationArgs.builder()
                .aws_account_id("111111111111")
                .bucket_name("...my_bucket_name...")
                .bucket_prefix("...my_bucket_prefix...")
                .credentials(DestinationAwsDatalakeConfigurationCredentialsArgs.builder()
                    .iamRole(DestinationAwsDatalakeConfigurationCredentialsIamRoleArgs.builder()
                        .roleArn("...my_role_arn...")
                        .build())
                    .iamUser(DestinationAwsDatalakeConfigurationCredentialsIamUserArgs.builder()
                        .awsAccessKeyId("...my_aws_access_key_id...")
                        .awsSecretAccessKey("...my_aws_secret_access_key...")
                        .build())
                    .build())
                .format(DestinationAwsDatalakeConfigurationFormatArgs.builder()
                    .jsonLinesNewlineDelimitedJson(DestinationAwsDatalakeConfigurationFormatJsonLinesNewlineDelimitedJsonArgs.builder()
                        .compressionCodec("UNCOMPRESSED")
                        .formatType("JSONL")
                        .build())
                    .parquetColumnarStorage(DestinationAwsDatalakeConfigurationFormatParquetColumnarStorageArgs.builder()
                        .compressionCodec("GZIP")
                        .formatType("Parquet")
                        .build())
                    .build())
                .glue_catalog_float_as_decimal(true)
                .lakeformation_database_default_tag_key("pii_level")
                .lakeformation_database_default_tag_values("private,public")
                .lakeformation_database_name("...my_lakeformation_database_name...")
                .lakeformation_governed_tables(true)
                .partitioning("DAY")
                .region("ap-southeast-4")
                .build())
            .definitionId("aa9c2d01-84b7-4474-ba6f-e45dbbc28cdd")
            .workspaceId("3df68150-9956-454d-8144-1645f409cdd1")
            .build());
    }
}
resources:
  myDestinationAwsdatalake:
    type: airbyte:DestinationAwsDatalake
    properties:
      configuration:
        aws_account_id: '111111111111'
        bucket_name: '...my_bucket_name...'
        bucket_prefix: '...my_bucket_prefix...'
        credentials:
          iamRole:
            roleArn: '...my_role_arn...'
          iamUser:
            awsAccessKeyId: '...my_aws_access_key_id...'
            awsSecretAccessKey: '...my_aws_secret_access_key...'
        format:
          jsonLinesNewlineDelimitedJson:
            compressionCodec: UNCOMPRESSED
            formatType: JSONL
          parquetColumnarStorage:
            compressionCodec: GZIP
            formatType: Parquet
        glue_catalog_float_as_decimal: true
        lakeformation_database_default_tag_key: pii_level
        lakeformation_database_default_tag_values: private,public
        lakeformation_database_name: '...my_lakeformation_database_name...'
        lakeformation_governed_tables: true
        partitioning: DAY
        region: ap-southeast-4
      definitionId: aa9c2d01-84b7-4474-ba6f-e45dbbc28cdd
      workspaceId: 3df68150-9956-454d-8144-1645f409cdd1
Create DestinationAwsDatalake Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new DestinationAwsDatalake(name: string, args: DestinationAwsDatalakeArgs, opts?: CustomResourceOptions);@overload
def DestinationAwsDatalake(resource_name: str,
                           args: DestinationAwsDatalakeArgs,
                           opts: Optional[ResourceOptions] = None)
@overload
def DestinationAwsDatalake(resource_name: str,
                           opts: Optional[ResourceOptions] = None,
                           configuration: Optional[DestinationAwsDatalakeConfigurationArgs] = None,
                           workspace_id: Optional[str] = None,
                           definition_id: Optional[str] = None,
                           name: Optional[str] = None)func NewDestinationAwsDatalake(ctx *Context, name string, args DestinationAwsDatalakeArgs, opts ...ResourceOption) (*DestinationAwsDatalake, error)public DestinationAwsDatalake(string name, DestinationAwsDatalakeArgs args, CustomResourceOptions? opts = null)
public DestinationAwsDatalake(String name, DestinationAwsDatalakeArgs args)
public DestinationAwsDatalake(String name, DestinationAwsDatalakeArgs args, CustomResourceOptions options)
type: airbyte:DestinationAwsDatalake
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DestinationAwsDatalakeArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DestinationAwsDatalakeArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DestinationAwsDatalakeArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DestinationAwsDatalakeArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DestinationAwsDatalakeArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var destinationAwsDatalakeResource = new Airbyte.DestinationAwsDatalake("destinationAwsDatalakeResource", new()
{
    Configuration = new Airbyte.Inputs.DestinationAwsDatalakeConfigurationArgs
    {
        BucketName = "string",
        Credentials = new Airbyte.Inputs.DestinationAwsDatalakeConfigurationCredentialsArgs
        {
            IamRole = new Airbyte.Inputs.DestinationAwsDatalakeConfigurationCredentialsIamRoleArgs
            {
                RoleArn = "string",
            },
            IamUser = new Airbyte.Inputs.DestinationAwsDatalakeConfigurationCredentialsIamUserArgs
            {
                AwsAccessKeyId = "string",
                AwsSecretAccessKey = "string",
            },
        },
        LakeformationDatabaseName = "string",
        AwsAccountId = "string",
        BucketPrefix = "string",
        Format = new Airbyte.Inputs.DestinationAwsDatalakeConfigurationFormatArgs
        {
            JsonLinesNewlineDelimitedJson = new Airbyte.Inputs.DestinationAwsDatalakeConfigurationFormatJsonLinesNewlineDelimitedJsonArgs
            {
                CompressionCodec = "string",
                FormatType = "string",
            },
            ParquetColumnarStorage = new Airbyte.Inputs.DestinationAwsDatalakeConfigurationFormatParquetColumnarStorageArgs
            {
                CompressionCodec = "string",
                FormatType = "string",
            },
        },
        GlueCatalogFloatAsDecimal = false,
        LakeformationDatabaseDefaultTagKey = "string",
        LakeformationDatabaseDefaultTagValues = "string",
        LakeformationGovernedTables = false,
        Partitioning = "string",
        Region = "string",
    },
    WorkspaceId = "string",
    DefinitionId = "string",
    Name = "string",
});
example, err := airbyte.NewDestinationAwsDatalake(ctx, "destinationAwsDatalakeResource", &airbyte.DestinationAwsDatalakeArgs{
Configuration: &.DestinationAwsDatalakeConfigurationArgs{
BucketName: pulumi.String("string"),
Credentials: &.DestinationAwsDatalakeConfigurationCredentialsArgs{
IamRole: &.DestinationAwsDatalakeConfigurationCredentialsIamRoleArgs{
RoleArn: pulumi.String("string"),
},
IamUser: &.DestinationAwsDatalakeConfigurationCredentialsIamUserArgs{
AwsAccessKeyId: pulumi.String("string"),
AwsSecretAccessKey: pulumi.String("string"),
},
},
LakeformationDatabaseName: pulumi.String("string"),
AwsAccountId: pulumi.String("string"),
BucketPrefix: pulumi.String("string"),
Format: &.DestinationAwsDatalakeConfigurationFormatArgs{
JsonLinesNewlineDelimitedJson: &.DestinationAwsDatalakeConfigurationFormatJsonLinesNewlineDelimitedJsonArgs{
CompressionCodec: pulumi.String("string"),
FormatType: pulumi.String("string"),
},
ParquetColumnarStorage: &.DestinationAwsDatalakeConfigurationFormatParquetColumnarStorageArgs{
CompressionCodec: pulumi.String("string"),
FormatType: pulumi.String("string"),
},
},
GlueCatalogFloatAsDecimal: pulumi.Bool(false),
LakeformationDatabaseDefaultTagKey: pulumi.String("string"),
LakeformationDatabaseDefaultTagValues: pulumi.String("string"),
LakeformationGovernedTables: pulumi.Bool(false),
Partitioning: pulumi.String("string"),
Region: pulumi.String("string"),
},
WorkspaceId: pulumi.String("string"),
DefinitionId: pulumi.String("string"),
Name: pulumi.String("string"),
})
var destinationAwsDatalakeResource = new DestinationAwsDatalake("destinationAwsDatalakeResource", DestinationAwsDatalakeArgs.builder()
    .configuration(DestinationAwsDatalakeConfigurationArgs.builder()
        .bucketName("string")
        .credentials(DestinationAwsDatalakeConfigurationCredentialsArgs.builder()
            .iamRole(DestinationAwsDatalakeConfigurationCredentialsIamRoleArgs.builder()
                .roleArn("string")
                .build())
            .iamUser(DestinationAwsDatalakeConfigurationCredentialsIamUserArgs.builder()
                .awsAccessKeyId("string")
                .awsSecretAccessKey("string")
                .build())
            .build())
        .lakeformationDatabaseName("string")
        .awsAccountId("string")
        .bucketPrefix("string")
        .format(DestinationAwsDatalakeConfigurationFormatArgs.builder()
            .jsonLinesNewlineDelimitedJson(DestinationAwsDatalakeConfigurationFormatJsonLinesNewlineDelimitedJsonArgs.builder()
                .compressionCodec("string")
                .formatType("string")
                .build())
            .parquetColumnarStorage(DestinationAwsDatalakeConfigurationFormatParquetColumnarStorageArgs.builder()
                .compressionCodec("string")
                .formatType("string")
                .build())
            .build())
        .glueCatalogFloatAsDecimal(false)
        .lakeformationDatabaseDefaultTagKey("string")
        .lakeformationDatabaseDefaultTagValues("string")
        .lakeformationGovernedTables(false)
        .partitioning("string")
        .region("string")
        .build())
    .workspaceId("string")
    .definitionId("string")
    .name("string")
    .build());
destination_aws_datalake_resource = airbyte.DestinationAwsDatalake("destinationAwsDatalakeResource",
    configuration={
        "bucket_name": "string",
        "credentials": {
            "iam_role": {
                "role_arn": "string",
            },
            "iam_user": {
                "aws_access_key_id": "string",
                "aws_secret_access_key": "string",
            },
        },
        "lakeformation_database_name": "string",
        "aws_account_id": "string",
        "bucket_prefix": "string",
        "format": {
            "json_lines_newline_delimited_json": {
                "compression_codec": "string",
                "format_type": "string",
            },
            "parquet_columnar_storage": {
                "compression_codec": "string",
                "format_type": "string",
            },
        },
        "glue_catalog_float_as_decimal": False,
        "lakeformation_database_default_tag_key": "string",
        "lakeformation_database_default_tag_values": "string",
        "lakeformation_governed_tables": False,
        "partitioning": "string",
        "region": "string",
    },
    workspace_id="string",
    definition_id="string",
    name="string")
const destinationAwsDatalakeResource = new airbyte.DestinationAwsDatalake("destinationAwsDatalakeResource", {
    configuration: {
        bucketName: "string",
        credentials: {
            iamRole: {
                roleArn: "string",
            },
            iamUser: {
                awsAccessKeyId: "string",
                awsSecretAccessKey: "string",
            },
        },
        lakeformationDatabaseName: "string",
        awsAccountId: "string",
        bucketPrefix: "string",
        format: {
            jsonLinesNewlineDelimitedJson: {
                compressionCodec: "string",
                formatType: "string",
            },
            parquetColumnarStorage: {
                compressionCodec: "string",
                formatType: "string",
            },
        },
        glueCatalogFloatAsDecimal: false,
        lakeformationDatabaseDefaultTagKey: "string",
        lakeformationDatabaseDefaultTagValues: "string",
        lakeformationGovernedTables: false,
        partitioning: "string",
        region: "string",
    },
    workspaceId: "string",
    definitionId: "string",
    name: "string",
});
type: airbyte:DestinationAwsDatalake
properties:
    configuration:
        awsAccountId: string
        bucketName: string
        bucketPrefix: string
        credentials:
            iamRole:
                roleArn: string
            iamUser:
                awsAccessKeyId: string
                awsSecretAccessKey: string
        format:
            jsonLinesNewlineDelimitedJson:
                compressionCodec: string
                formatType: string
            parquetColumnarStorage:
                compressionCodec: string
                formatType: string
        glueCatalogFloatAsDecimal: false
        lakeformationDatabaseDefaultTagKey: string
        lakeformationDatabaseDefaultTagValues: string
        lakeformationDatabaseName: string
        lakeformationGovernedTables: false
        partitioning: string
        region: string
    definitionId: string
    name: string
    workspaceId: string
DestinationAwsDatalake Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The DestinationAwsDatalake resource accepts the following input properties:
- Configuration
DestinationAws Datalake Configuration 
- WorkspaceId string
- DefinitionId string
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- Name string
- Name of the destination e.g. dev-mysql-instance.
- Configuration
DestinationAws Datalake Configuration Args 
- WorkspaceId string
- DefinitionId string
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- Name string
- Name of the destination e.g. dev-mysql-instance.
- configuration
DestinationAws Datalake Configuration 
- workspaceId String
- definitionId String
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- name String
- Name of the destination e.g. dev-mysql-instance.
- configuration
DestinationAws Datalake Configuration 
- workspaceId string
- definitionId string
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- name string
- Name of the destination e.g. dev-mysql-instance.
- configuration
DestinationAws Datalake Configuration Args 
- workspace_id str
- definition_id str
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- name str
- Name of the destination e.g. dev-mysql-instance.
- configuration Property Map
- workspaceId String
- definitionId String
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- name String
- Name of the destination e.g. dev-mysql-instance.
Outputs
All input properties are implicitly available as output properties. Additionally, the DestinationAwsDatalake resource produces the following output properties:
- CreatedAt double
- DestinationId string
- DestinationType string
- Id string
- The provider-assigned unique ID for this managed resource.
- CreatedAt float64
- DestinationId string
- DestinationType string
- Id string
- The provider-assigned unique ID for this managed resource.
- createdAt Double
- destinationId String
- destinationType String
- id String
- The provider-assigned unique ID for this managed resource.
- createdAt number
- destinationId string
- destinationType string
- id string
- The provider-assigned unique ID for this managed resource.
- created_at float
- destination_id str
- destination_type str
- id str
- The provider-assigned unique ID for this managed resource.
- createdAt Number
- destinationId String
- destinationType String
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing DestinationAwsDatalake Resource
Get an existing DestinationAwsDatalake resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: DestinationAwsDatalakeState, opts?: CustomResourceOptions): DestinationAwsDatalake@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        configuration: Optional[DestinationAwsDatalakeConfigurationArgs] = None,
        created_at: Optional[float] = None,
        definition_id: Optional[str] = None,
        destination_id: Optional[str] = None,
        destination_type: Optional[str] = None,
        name: Optional[str] = None,
        workspace_id: Optional[str] = None) -> DestinationAwsDatalakefunc GetDestinationAwsDatalake(ctx *Context, name string, id IDInput, state *DestinationAwsDatalakeState, opts ...ResourceOption) (*DestinationAwsDatalake, error)public static DestinationAwsDatalake Get(string name, Input<string> id, DestinationAwsDatalakeState? state, CustomResourceOptions? opts = null)public static DestinationAwsDatalake get(String name, Output<String> id, DestinationAwsDatalakeState state, CustomResourceOptions options)resources:  _:    type: airbyte:DestinationAwsDatalake    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Configuration
DestinationAws Datalake Configuration 
- CreatedAt double
- DefinitionId string
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- DestinationId string
- DestinationType string
- Name string
- Name of the destination e.g. dev-mysql-instance.
- WorkspaceId string
- Configuration
DestinationAws Datalake Configuration Args 
- CreatedAt float64
- DefinitionId string
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- DestinationId string
- DestinationType string
- Name string
- Name of the destination e.g. dev-mysql-instance.
- WorkspaceId string
- configuration
DestinationAws Datalake Configuration 
- createdAt Double
- definitionId String
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- destinationId String
- destinationType String
- name String
- Name of the destination e.g. dev-mysql-instance.
- workspaceId String
- configuration
DestinationAws Datalake Configuration 
- createdAt number
- definitionId string
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- destinationId string
- destinationType string
- name string
- Name of the destination e.g. dev-mysql-instance.
- workspaceId string
- configuration
DestinationAws Datalake Configuration Args 
- created_at float
- definition_id str
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- destination_id str
- destination_type str
- name str
- Name of the destination e.g. dev-mysql-instance.
- workspace_id str
- configuration Property Map
- createdAt Number
- definitionId String
- The UUID of the connector definition. One of configuration.destinationType or definitionId must be provided. Requires replacement if changed.
- destinationId String
- destinationType String
- name String
- Name of the destination e.g. dev-mysql-instance.
- workspaceId String
Supporting Types
DestinationAwsDatalakeConfiguration, DestinationAwsDatalakeConfigurationArgs        
- BucketName string
- The name of the S3 bucket. Read more \n\nhere\n\n.
- Credentials
DestinationAws Datalake Configuration Credentials 
- Choose How to Authenticate to AWS.
- LakeformationDatabase stringName 
- The default database this destination will use to create tables in per stream. Can be changed per connection by customizing the namespace.
- AwsAccount stringId 
- target aws account id
- BucketPrefix string
- S3 prefix
- Format
DestinationAws Datalake Configuration Format 
- Format of the data output.
- GlueCatalog boolFloat As Decimal 
- Cast float/double as decimal(38,18). This can help achieve higher accuracy and represent numbers correctly as received from the source. Default: false
- LakeformationDatabase stringDefault Tag Key 
- Add a default tag key to databases created by this destination
- LakeformationDatabase stringDefault Tag Values 
- Add default values for the Tag Keyto databases created by this destination. Comma separate for multiple values.
- LakeformationGoverned boolTables 
- Whether to create tables as LF governed tables. Default: false
- Partitioning string
- Partition data by cursor fields when a cursor field is a date. Default: "NO PARTITIONING"; must be one of ["NO PARTITIONING", "DATE", "YEAR", "MONTH", "DAY", "YEAR/MONTH", "YEAR/MONTH/DAY"]
- Region string
- The region of the S3 bucket. See \n\nhere\n\n for all region codes. Default: ""; must be one of ["", "af-south-1", "ap-east-1", "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ap-southeast-4", "ca-central-1", "ca-west-1", "cn-north-1", "cn-northwest-1", "eu-central-1", "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", "il-central-1", "me-central-1", "me-south-1", "sa-east-1", "us-east-1", "us-east-2", "us-gov-east-1", "us-gov-west-1", "us-west-1", "us-west-2"]
- BucketName string
- The name of the S3 bucket. Read more \n\nhere\n\n.
- Credentials
DestinationAws Datalake Configuration Credentials 
- Choose How to Authenticate to AWS.
- LakeformationDatabase stringName 
- The default database this destination will use to create tables in per stream. Can be changed per connection by customizing the namespace.
- AwsAccount stringId 
- target aws account id
- BucketPrefix string
- S3 prefix
- Format
DestinationAws Datalake Configuration Format 
- Format of the data output.
- GlueCatalog boolFloat As Decimal 
- Cast float/double as decimal(38,18). This can help achieve higher accuracy and represent numbers correctly as received from the source. Default: false
- LakeformationDatabase stringDefault Tag Key 
- Add a default tag key to databases created by this destination
- LakeformationDatabase stringDefault Tag Values 
- Add default values for the Tag Keyto databases created by this destination. Comma separate for multiple values.
- LakeformationGoverned boolTables 
- Whether to create tables as LF governed tables. Default: false
- Partitioning string
- Partition data by cursor fields when a cursor field is a date. Default: "NO PARTITIONING"; must be one of ["NO PARTITIONING", "DATE", "YEAR", "MONTH", "DAY", "YEAR/MONTH", "YEAR/MONTH/DAY"]
- Region string
- The region of the S3 bucket. See \n\nhere\n\n for all region codes. Default: ""; must be one of ["", "af-south-1", "ap-east-1", "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ap-southeast-4", "ca-central-1", "ca-west-1", "cn-north-1", "cn-northwest-1", "eu-central-1", "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", "il-central-1", "me-central-1", "me-south-1", "sa-east-1", "us-east-1", "us-east-2", "us-gov-east-1", "us-gov-west-1", "us-west-1", "us-west-2"]
- bucketName String
- The name of the S3 bucket. Read more \n\nhere\n\n.
- credentials
DestinationAws Datalake Configuration Credentials 
- Choose How to Authenticate to AWS.
- lakeformationDatabase StringName 
- The default database this destination will use to create tables in per stream. Can be changed per connection by customizing the namespace.
- awsAccount StringId 
- target aws account id
- bucketPrefix String
- S3 prefix
- format
DestinationAws Datalake Configuration Format 
- Format of the data output.
- glueCatalog BooleanFloat As Decimal 
- Cast float/double as decimal(38,18). This can help achieve higher accuracy and represent numbers correctly as received from the source. Default: false
- lakeformationDatabase StringDefault Tag Key 
- Add a default tag key to databases created by this destination
- lakeformationDatabase StringDefault Tag Values 
- Add default values for the Tag Keyto databases created by this destination. Comma separate for multiple values.
- lakeformationGoverned BooleanTables 
- Whether to create tables as LF governed tables. Default: false
- partitioning String
- Partition data by cursor fields when a cursor field is a date. Default: "NO PARTITIONING"; must be one of ["NO PARTITIONING", "DATE", "YEAR", "MONTH", "DAY", "YEAR/MONTH", "YEAR/MONTH/DAY"]
- region String
- The region of the S3 bucket. See \n\nhere\n\n for all region codes. Default: ""; must be one of ["", "af-south-1", "ap-east-1", "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ap-southeast-4", "ca-central-1", "ca-west-1", "cn-north-1", "cn-northwest-1", "eu-central-1", "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", "il-central-1", "me-central-1", "me-south-1", "sa-east-1", "us-east-1", "us-east-2", "us-gov-east-1", "us-gov-west-1", "us-west-1", "us-west-2"]
- bucketName string
- The name of the S3 bucket. Read more \n\nhere\n\n.
- credentials
DestinationAws Datalake Configuration Credentials 
- Choose How to Authenticate to AWS.
- lakeformationDatabase stringName 
- The default database this destination will use to create tables in per stream. Can be changed per connection by customizing the namespace.
- awsAccount stringId 
- target aws account id
- bucketPrefix string
- S3 prefix
- format
DestinationAws Datalake Configuration Format 
- Format of the data output.
- glueCatalog booleanFloat As Decimal 
- Cast float/double as decimal(38,18). This can help achieve higher accuracy and represent numbers correctly as received from the source. Default: false
- lakeformationDatabase stringDefault Tag Key 
- Add a default tag key to databases created by this destination
- lakeformationDatabase stringDefault Tag Values 
- Add default values for the Tag Keyto databases created by this destination. Comma separate for multiple values.
- lakeformationGoverned booleanTables 
- Whether to create tables as LF governed tables. Default: false
- partitioning string
- Partition data by cursor fields when a cursor field is a date. Default: "NO PARTITIONING"; must be one of ["NO PARTITIONING", "DATE", "YEAR", "MONTH", "DAY", "YEAR/MONTH", "YEAR/MONTH/DAY"]
- region string
- The region of the S3 bucket. See \n\nhere\n\n for all region codes. Default: ""; must be one of ["", "af-south-1", "ap-east-1", "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ap-southeast-4", "ca-central-1", "ca-west-1", "cn-north-1", "cn-northwest-1", "eu-central-1", "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", "il-central-1", "me-central-1", "me-south-1", "sa-east-1", "us-east-1", "us-east-2", "us-gov-east-1", "us-gov-west-1", "us-west-1", "us-west-2"]
- bucket_name str
- The name of the S3 bucket. Read more \n\nhere\n\n.
- credentials
DestinationAws Datalake Configuration Credentials 
- Choose How to Authenticate to AWS.
- lakeformation_database_ strname 
- The default database this destination will use to create tables in per stream. Can be changed per connection by customizing the namespace.
- aws_account_ strid 
- target aws account id
- bucket_prefix str
- S3 prefix
- format
DestinationAws Datalake Configuration Format 
- Format of the data output.
- glue_catalog_ boolfloat_ as_ decimal 
- Cast float/double as decimal(38,18). This can help achieve higher accuracy and represent numbers correctly as received from the source. Default: false
- lakeformation_database_ strdefault_ tag_ key 
- Add a default tag key to databases created by this destination
- lakeformation_database_ strdefault_ tag_ values 
- Add default values for the Tag Keyto databases created by this destination. Comma separate for multiple values.
- lakeformation_governed_ booltables 
- Whether to create tables as LF governed tables. Default: false
- partitioning str
- Partition data by cursor fields when a cursor field is a date. Default: "NO PARTITIONING"; must be one of ["NO PARTITIONING", "DATE", "YEAR", "MONTH", "DAY", "YEAR/MONTH", "YEAR/MONTH/DAY"]
- region str
- The region of the S3 bucket. See \n\nhere\n\n for all region codes. Default: ""; must be one of ["", "af-south-1", "ap-east-1", "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ap-southeast-4", "ca-central-1", "ca-west-1", "cn-north-1", "cn-northwest-1", "eu-central-1", "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", "il-central-1", "me-central-1", "me-south-1", "sa-east-1", "us-east-1", "us-east-2", "us-gov-east-1", "us-gov-west-1", "us-west-1", "us-west-2"]
- bucketName String
- The name of the S3 bucket. Read more \n\nhere\n\n.
- credentials Property Map
- Choose How to Authenticate to AWS.
- lakeformationDatabase StringName 
- The default database this destination will use to create tables in per stream. Can be changed per connection by customizing the namespace.
- awsAccount StringId 
- target aws account id
- bucketPrefix String
- S3 prefix
- format Property Map
- Format of the data output.
- glueCatalog BooleanFloat As Decimal 
- Cast float/double as decimal(38,18). This can help achieve higher accuracy and represent numbers correctly as received from the source. Default: false
- lakeformationDatabase StringDefault Tag Key 
- Add a default tag key to databases created by this destination
- lakeformationDatabase StringDefault Tag Values 
- Add default values for the Tag Keyto databases created by this destination. Comma separate for multiple values.
- lakeformationGoverned BooleanTables 
- Whether to create tables as LF governed tables. Default: false
- partitioning String
- Partition data by cursor fields when a cursor field is a date. Default: "NO PARTITIONING"; must be one of ["NO PARTITIONING", "DATE", "YEAR", "MONTH", "DAY", "YEAR/MONTH", "YEAR/MONTH/DAY"]
- region String
- The region of the S3 bucket. See \n\nhere\n\n for all region codes. Default: ""; must be one of ["", "af-south-1", "ap-east-1", "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ap-southeast-4", "ca-central-1", "ca-west-1", "cn-north-1", "cn-northwest-1", "eu-central-1", "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", "il-central-1", "me-central-1", "me-south-1", "sa-east-1", "us-east-1", "us-east-2", "us-gov-east-1", "us-gov-west-1", "us-west-1", "us-west-2"]
DestinationAwsDatalakeConfigurationCredentials, DestinationAwsDatalakeConfigurationCredentialsArgs          
DestinationAwsDatalakeConfigurationCredentialsIamRole, DestinationAwsDatalakeConfigurationCredentialsIamRoleArgs              
- RoleArn string
- Will assume this role to write data to s3
- RoleArn string
- Will assume this role to write data to s3
- roleArn String
- Will assume this role to write data to s3
- roleArn string
- Will assume this role to write data to s3
- role_arn str
- Will assume this role to write data to s3
- roleArn String
- Will assume this role to write data to s3
DestinationAwsDatalakeConfigurationCredentialsIamUser, DestinationAwsDatalakeConfigurationCredentialsIamUserArgs              
- AwsAccess stringKey Id 
- AWS User Access Key Id
- AwsSecret stringAccess Key 
- Secret Access Key
- AwsAccess stringKey Id 
- AWS User Access Key Id
- AwsSecret stringAccess Key 
- Secret Access Key
- awsAccess StringKey Id 
- AWS User Access Key Id
- awsSecret StringAccess Key 
- Secret Access Key
- awsAccess stringKey Id 
- AWS User Access Key Id
- awsSecret stringAccess Key 
- Secret Access Key
- aws_access_ strkey_ id 
- AWS User Access Key Id
- aws_secret_ straccess_ key 
- Secret Access Key
- awsAccess StringKey Id 
- AWS User Access Key Id
- awsSecret StringAccess Key 
- Secret Access Key
DestinationAwsDatalakeConfigurationFormat, DestinationAwsDatalakeConfigurationFormatArgs          
DestinationAwsDatalakeConfigurationFormatJsonLinesNewlineDelimitedJson, DestinationAwsDatalakeConfigurationFormatJsonLinesNewlineDelimitedJsonArgs                    
- CompressionCodec string
- The compression algorithm used to compress data. Default: "UNCOMPRESSED"; must be one of ["UNCOMPRESSED", "GZIP"]
- FormatType string
- Default: "JSONL"; must be "JSONL"
- CompressionCodec string
- The compression algorithm used to compress data. Default: "UNCOMPRESSED"; must be one of ["UNCOMPRESSED", "GZIP"]
- FormatType string
- Default: "JSONL"; must be "JSONL"
- compressionCodec String
- The compression algorithm used to compress data. Default: "UNCOMPRESSED"; must be one of ["UNCOMPRESSED", "GZIP"]
- formatType String
- Default: "JSONL"; must be "JSONL"
- compressionCodec string
- The compression algorithm used to compress data. Default: "UNCOMPRESSED"; must be one of ["UNCOMPRESSED", "GZIP"]
- formatType string
- Default: "JSONL"; must be "JSONL"
- compression_codec str
- The compression algorithm used to compress data. Default: "UNCOMPRESSED"; must be one of ["UNCOMPRESSED", "GZIP"]
- format_type str
- Default: "JSONL"; must be "JSONL"
- compressionCodec String
- The compression algorithm used to compress data. Default: "UNCOMPRESSED"; must be one of ["UNCOMPRESSED", "GZIP"]
- formatType String
- Default: "JSONL"; must be "JSONL"
DestinationAwsDatalakeConfigurationFormatParquetColumnarStorage, DestinationAwsDatalakeConfigurationFormatParquetColumnarStorageArgs                
- CompressionCodec string
- The compression algorithm used to compress data. Default: "SNAPPY"; must be one of ["UNCOMPRESSED", "SNAPPY", "GZIP", "ZSTD"]
- FormatType string
- Default: "Parquet"; must be "Parquet"
- CompressionCodec string
- The compression algorithm used to compress data. Default: "SNAPPY"; must be one of ["UNCOMPRESSED", "SNAPPY", "GZIP", "ZSTD"]
- FormatType string
- Default: "Parquet"; must be "Parquet"
- compressionCodec String
- The compression algorithm used to compress data. Default: "SNAPPY"; must be one of ["UNCOMPRESSED", "SNAPPY", "GZIP", "ZSTD"]
- formatType String
- Default: "Parquet"; must be "Parquet"
- compressionCodec string
- The compression algorithm used to compress data. Default: "SNAPPY"; must be one of ["UNCOMPRESSED", "SNAPPY", "GZIP", "ZSTD"]
- formatType string
- Default: "Parquet"; must be "Parquet"
- compression_codec str
- The compression algorithm used to compress data. Default: "SNAPPY"; must be one of ["UNCOMPRESSED", "SNAPPY", "GZIP", "ZSTD"]
- format_type str
- Default: "Parquet"; must be "Parquet"
- compressionCodec String
- The compression algorithm used to compress data. Default: "SNAPPY"; must be one of ["UNCOMPRESSED", "SNAPPY", "GZIP", "ZSTD"]
- formatType String
- Default: "Parquet"; must be "Parquet"
Import
$ pulumi import airbyte:index/destinationAwsDatalake:DestinationAwsDatalake my_airbyte_destination_aws_datalake ""
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- airbyte airbytehq/terraform-provider-airbyte
- License
- Notes
- This Pulumi package is based on the airbyteTerraform Provider.