MongoDB Atlas v3.30.0 published on Friday, Mar 21, 2025 by Pulumi
mongodbatlas.getDataLakePipeline
Explore with Pulumi AI
WARNING: Data Lake is deprecated. To learn more, see https://dochub.mongodb.org/core/data-lake-deprecation
mongodbatlas.DataLakePipeline describes a Data Lake Pipeline.
NOTE: Groups and projects are synonymous terms. You may find
group_idin the official documentation.
Example Usage
S
Coming soon!
Coming soon!
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.mongodbatlas.Project;
import com.pulumi.mongodbatlas.ProjectArgs;
import com.pulumi.mongodbatlas.AdvancedCluster;
import com.pulumi.mongodbatlas.AdvancedClusterArgs;
import com.pulumi.mongodbatlas.inputs.AdvancedClusterReplicationSpecArgs;
import com.pulumi.mongodbatlas.DataLakePipeline;
import com.pulumi.mongodbatlas.DataLakePipelineArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineSinkArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineSourceArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineTransformationArgs;
import com.pulumi.mongodbatlas.MongodbatlasFunctions;
import com.pulumi.mongodbatlas.inputs.GetDataLakePipelineArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var projectTest = new Project("projectTest", ProjectArgs.builder()
            .name("NAME OF THE PROJECT")
            .orgId("ORGANIZATION ID")
            .build());
        var automatedBackupTest = new AdvancedCluster("automatedBackupTest", AdvancedClusterArgs.builder()
            .projectId(projectId)
            .name("automated-backup-test")
            .clusterType("REPLICASET")
            .backupEnabled(true)
            .replicationSpecs(AdvancedClusterReplicationSpecArgs.builder()
                .regionConfigs(AdvancedClusterReplicationSpecRegionConfigArgs.builder()
                    .priority(7)
                    .providerName("GCP")
                    .regionName("US_EAST_4")
                    .electableSpecs(AdvancedClusterReplicationSpecRegionConfigElectableSpecsArgs.builder()
                        .instanceSize("M10")
                        .nodeCount(3)
                        .build())
                    .build())
                .build())
            .build());
        var pipeline = new DataLakePipeline("pipeline", DataLakePipelineArgs.builder()
            .projectId(projectTest.projectId())
            .name("DataLakePipelineName")
            .sink(DataLakePipelineSinkArgs.builder()
                .type("DLS")
                .partitionFields(DataLakePipelineSinkPartitionFieldArgs.builder()
                    .name("access")
                    .order(0)
                    .build())
                .build())
            .source(DataLakePipelineSourceArgs.builder()
                .type("ON_DEMAND_CPS")
                .clusterName(automatedBackupTest.name())
                .databaseName("sample_airbnb")
                .collectionName("listingsAndReviews")
                .build())
            .transformations(            
                DataLakePipelineTransformationArgs.builder()
                    .field("test")
                    .type("EXCLUDE")
                    .build(),
                DataLakePipelineTransformationArgs.builder()
                    .field("test22")
                    .type("EXCLUDE")
                    .build())
            .build());
        final var pipelineDataSource = MongodbatlasFunctions.getDataLakePipeline(GetDataLakePipelineArgs.builder()
            .projectId(pipeline.projectId())
            .name(pipeline.name())
            .build());
    }
}
resources:
  projectTest:
    type: mongodbatlas:Project
    properties:
      name: NAME OF THE PROJECT
      orgId: ORGANIZATION ID
  automatedBackupTest:
    type: mongodbatlas:AdvancedCluster
    name: automated_backup_test
    properties:
      projectId: ${projectId}
      name: automated-backup-test
      clusterType: REPLICASET
      backupEnabled: true # enable cloud backup snapshots
      replicationSpecs:
        - regionConfigs:
            - priority: 7
              providerName: GCP
              regionName: US_EAST_4
              electableSpecs:
                instanceSize: M10
                nodeCount: 3
  pipeline:
    type: mongodbatlas:DataLakePipeline
    properties:
      projectId: ${projectTest.projectId}
      name: DataLakePipelineName
      sink:
        type: DLS
        partitionFields:
          - name: access
            order: 0
      source:
        type: ON_DEMAND_CPS
        clusterName: ${automatedBackupTest.name}
        databaseName: sample_airbnb
        collectionName: listingsAndReviews
      transformations:
        - field: test
          type: EXCLUDE
        - field: test22
          type: EXCLUDE
variables:
  pipelineDataSource:
    fn::invoke:
      function: mongodbatlas:getDataLakePipeline
      arguments:
        projectId: ${pipeline.projectId}
        name: ${pipeline.name}
Using getDataLakePipeline
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getDataLakePipeline(args: GetDataLakePipelineArgs, opts?: InvokeOptions): Promise<GetDataLakePipelineResult>
function getDataLakePipelineOutput(args: GetDataLakePipelineOutputArgs, opts?: InvokeOptions): Output<GetDataLakePipelineResult>def get_data_lake_pipeline(name: Optional[str] = None,
                           project_id: Optional[str] = None,
                           opts: Optional[InvokeOptions] = None) -> GetDataLakePipelineResult
def get_data_lake_pipeline_output(name: Optional[pulumi.Input[str]] = None,
                           project_id: Optional[pulumi.Input[str]] = None,
                           opts: Optional[InvokeOptions] = None) -> Output[GetDataLakePipelineResult]func LookupDataLakePipeline(ctx *Context, args *LookupDataLakePipelineArgs, opts ...InvokeOption) (*LookupDataLakePipelineResult, error)
func LookupDataLakePipelineOutput(ctx *Context, args *LookupDataLakePipelineOutputArgs, opts ...InvokeOption) LookupDataLakePipelineResultOutput> Note: This function is named LookupDataLakePipeline in the Go SDK.
public static class GetDataLakePipeline 
{
    public static Task<GetDataLakePipelineResult> InvokeAsync(GetDataLakePipelineArgs args, InvokeOptions? opts = null)
    public static Output<GetDataLakePipelineResult> Invoke(GetDataLakePipelineInvokeArgs args, InvokeOptions? opts = null)
}public static CompletableFuture<GetDataLakePipelineResult> getDataLakePipeline(GetDataLakePipelineArgs args, InvokeOptions options)
public static Output<GetDataLakePipelineResult> getDataLakePipeline(GetDataLakePipelineArgs args, InvokeOptions options)
fn::invoke:
  function: mongodbatlas:index/getDataLakePipeline:getDataLakePipeline
  arguments:
    # arguments dictionaryThe following arguments are supported:
- name str
- Name of the Atlas Data Lake Pipeline.
- project_id str
- The unique ID for the project to create a Data Lake Pipeline.
getDataLakePipeline Result
The following output properties are available:
- CreatedDate string
- Timestamp that indicates when the Data Lake Pipeline was created.
- Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- IngestionSchedules List<GetData Lake Pipeline Ingestion Schedule> 
- List of backup schedule policy items that you can use as a Data Lake Pipeline source.- ingestion_schedules.#.id- Unique 24-hexadecimal digit string that identifies this backup policy item.
- ingestion_schedules.#.frequency_type- Human-readable label that identifies the frequency type associated with the backup policy.
- ingestion_schedules.#.frequency_interval- Number that indicates the frequency interval for a set of snapshots.
- ingestion_schedules.#.retention_unit- Unit of time in which MongoDB Atlas measures snapshot retention.
- ingestion_schedules.#.retention_value- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
 
- LastUpdated stringDate 
- Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- Name string
- ProjectId string
- Unique 24-hexadecimal character string that identifies the project.- policyItemId- Unique 24-hexadecimal character string that identifies a policy item.
 
- Sinks
List<GetData Lake Pipeline Sink> 
- Snapshots
List<GetData Lake Pipeline Snapshot> 
- List of backup snapshots that you can use to trigger an on demand pipeline run.- snapshots.#.id- Unique 24-hexadecimal digit string that identifies the snapshot.
- snapshots.#.provider- Human-readable label that identifies the cloud provider that stores this snapshot.
- snapshots.#.created_at- Date and time when MongoDB Atlas took the snapshot.
- snapshots.#.expires_at- Date and time when MongoDB Atlas deletes the snapshot.
- snapshots.#.frequency_type- Human-readable label that identifies how often this snapshot triggers.
- snapshots.#.master_key- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.
- snapshots.#.mongod_version- Version of the MongoDB host that this snapshot backs up.
- snapshots.#.replica_set_name- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.
- snapshots.#.type- Human-readable label that categorizes the cluster as a replica set or sharded cluster.
- snapshots.#.snapshot_type- Human-readable label that identifies when this snapshot triggers.
- snapshots.#.status- Human-readable label that indicates the stage of the backup process for this snapshot.
- snapshots.#.size- List of backup snapshots that you can use to trigger an on demand pipeline run.
- snapshots.#.copy_region- List that identifies the regions to which MongoDB Atlas copies the snapshot.
- snapshots.#.policies- List that contains unique identifiers for the policy items.
 
- Sources
List<GetData Lake Pipeline Source> 
- State string
- State of this Data Lake Pipeline.
- Transformations
List<GetData Lake Pipeline Transformation> 
- Fields to be excluded for this Data Lake Pipeline.- transformations.#.field- Key in the document.
- transformations.#.type- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
 
- CreatedDate string
- Timestamp that indicates when the Data Lake Pipeline was created.
- Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- IngestionSchedules []GetData Lake Pipeline Ingestion Schedule 
- List of backup schedule policy items that you can use as a Data Lake Pipeline source.- ingestion_schedules.#.id- Unique 24-hexadecimal digit string that identifies this backup policy item.
- ingestion_schedules.#.frequency_type- Human-readable label that identifies the frequency type associated with the backup policy.
- ingestion_schedules.#.frequency_interval- Number that indicates the frequency interval for a set of snapshots.
- ingestion_schedules.#.retention_unit- Unit of time in which MongoDB Atlas measures snapshot retention.
- ingestion_schedules.#.retention_value- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
 
- LastUpdated stringDate 
- Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- Name string
- ProjectId string
- Unique 24-hexadecimal character string that identifies the project.- policyItemId- Unique 24-hexadecimal character string that identifies a policy item.
 
- Sinks
[]GetData Lake Pipeline Sink 
- Snapshots
[]GetData Lake Pipeline Snapshot 
- List of backup snapshots that you can use to trigger an on demand pipeline run.- snapshots.#.id- Unique 24-hexadecimal digit string that identifies the snapshot.
- snapshots.#.provider- Human-readable label that identifies the cloud provider that stores this snapshot.
- snapshots.#.created_at- Date and time when MongoDB Atlas took the snapshot.
- snapshots.#.expires_at- Date and time when MongoDB Atlas deletes the snapshot.
- snapshots.#.frequency_type- Human-readable label that identifies how often this snapshot triggers.
- snapshots.#.master_key- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.
- snapshots.#.mongod_version- Version of the MongoDB host that this snapshot backs up.
- snapshots.#.replica_set_name- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.
- snapshots.#.type- Human-readable label that categorizes the cluster as a replica set or sharded cluster.
- snapshots.#.snapshot_type- Human-readable label that identifies when this snapshot triggers.
- snapshots.#.status- Human-readable label that indicates the stage of the backup process for this snapshot.
- snapshots.#.size- List of backup snapshots that you can use to trigger an on demand pipeline run.
- snapshots.#.copy_region- List that identifies the regions to which MongoDB Atlas copies the snapshot.
- snapshots.#.policies- List that contains unique identifiers for the policy items.
 
- Sources
[]GetData Lake Pipeline Source 
- State string
- State of this Data Lake Pipeline.
- Transformations
[]GetData Lake Pipeline Transformation 
- Fields to be excluded for this Data Lake Pipeline.- transformations.#.field- Key in the document.
- transformations.#.type- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
 
- createdDate String
- Timestamp that indicates when the Data Lake Pipeline was created.
- id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- ingestionSchedules List<GetData Lake Pipeline Ingestion Schedule> 
- List of backup schedule policy items that you can use as a Data Lake Pipeline source.- ingestion_schedules.#.id- Unique 24-hexadecimal digit string that identifies this backup policy item.
- ingestion_schedules.#.frequency_type- Human-readable label that identifies the frequency type associated with the backup policy.
- ingestion_schedules.#.frequency_interval- Number that indicates the frequency interval for a set of snapshots.
- ingestion_schedules.#.retention_unit- Unit of time in which MongoDB Atlas measures snapshot retention.
- ingestion_schedules.#.retention_value- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
 
- lastUpdated StringDate 
- Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name String
- projectId String
- Unique 24-hexadecimal character string that identifies the project.- policyItemId- Unique 24-hexadecimal character string that identifies a policy item.
 
- sinks
List<GetData Lake Pipeline Sink> 
- snapshots
List<GetData Lake Pipeline Snapshot> 
- List of backup snapshots that you can use to trigger an on demand pipeline run.- snapshots.#.id- Unique 24-hexadecimal digit string that identifies the snapshot.
- snapshots.#.provider- Human-readable label that identifies the cloud provider that stores this snapshot.
- snapshots.#.created_at- Date and time when MongoDB Atlas took the snapshot.
- snapshots.#.expires_at- Date and time when MongoDB Atlas deletes the snapshot.
- snapshots.#.frequency_type- Human-readable label that identifies how often this snapshot triggers.
- snapshots.#.master_key- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.
- snapshots.#.mongod_version- Version of the MongoDB host that this snapshot backs up.
- snapshots.#.replica_set_name- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.
- snapshots.#.type- Human-readable label that categorizes the cluster as a replica set or sharded cluster.
- snapshots.#.snapshot_type- Human-readable label that identifies when this snapshot triggers.
- snapshots.#.status- Human-readable label that indicates the stage of the backup process for this snapshot.
- snapshots.#.size- List of backup snapshots that you can use to trigger an on demand pipeline run.
- snapshots.#.copy_region- List that identifies the regions to which MongoDB Atlas copies the snapshot.
- snapshots.#.policies- List that contains unique identifiers for the policy items.
 
- sources
List<GetData Lake Pipeline Source> 
- state String
- State of this Data Lake Pipeline.
- transformations
List<GetData Lake Pipeline Transformation> 
- Fields to be excluded for this Data Lake Pipeline.- transformations.#.field- Key in the document.
- transformations.#.type- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
 
- createdDate string
- Timestamp that indicates when the Data Lake Pipeline was created.
- id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- ingestionSchedules GetData Lake Pipeline Ingestion Schedule[] 
- List of backup schedule policy items that you can use as a Data Lake Pipeline source.- ingestion_schedules.#.id- Unique 24-hexadecimal digit string that identifies this backup policy item.
- ingestion_schedules.#.frequency_type- Human-readable label that identifies the frequency type associated with the backup policy.
- ingestion_schedules.#.frequency_interval- Number that indicates the frequency interval for a set of snapshots.
- ingestion_schedules.#.retention_unit- Unit of time in which MongoDB Atlas measures snapshot retention.
- ingestion_schedules.#.retention_value- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
 
- lastUpdated stringDate 
- Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name string
- projectId string
- Unique 24-hexadecimal character string that identifies the project.- policyItemId- Unique 24-hexadecimal character string that identifies a policy item.
 
- sinks
GetData Lake Pipeline Sink[] 
- snapshots
GetData Lake Pipeline Snapshot[] 
- List of backup snapshots that you can use to trigger an on demand pipeline run.- snapshots.#.id- Unique 24-hexadecimal digit string that identifies the snapshot.
- snapshots.#.provider- Human-readable label that identifies the cloud provider that stores this snapshot.
- snapshots.#.created_at- Date and time when MongoDB Atlas took the snapshot.
- snapshots.#.expires_at- Date and time when MongoDB Atlas deletes the snapshot.
- snapshots.#.frequency_type- Human-readable label that identifies how often this snapshot triggers.
- snapshots.#.master_key- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.
- snapshots.#.mongod_version- Version of the MongoDB host that this snapshot backs up.
- snapshots.#.replica_set_name- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.
- snapshots.#.type- Human-readable label that categorizes the cluster as a replica set or sharded cluster.
- snapshots.#.snapshot_type- Human-readable label that identifies when this snapshot triggers.
- snapshots.#.status- Human-readable label that indicates the stage of the backup process for this snapshot.
- snapshots.#.size- List of backup snapshots that you can use to trigger an on demand pipeline run.
- snapshots.#.copy_region- List that identifies the regions to which MongoDB Atlas copies the snapshot.
- snapshots.#.policies- List that contains unique identifiers for the policy items.
 
- sources
GetData Lake Pipeline Source[] 
- state string
- State of this Data Lake Pipeline.
- transformations
GetData Lake Pipeline Transformation[] 
- Fields to be excluded for this Data Lake Pipeline.- transformations.#.field- Key in the document.
- transformations.#.type- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
 
- created_date str
- Timestamp that indicates when the Data Lake Pipeline was created.
- id str
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- ingestion_schedules Sequence[GetData Lake Pipeline Ingestion Schedule] 
- List of backup schedule policy items that you can use as a Data Lake Pipeline source.- ingestion_schedules.#.id- Unique 24-hexadecimal digit string that identifies this backup policy item.
- ingestion_schedules.#.frequency_type- Human-readable label that identifies the frequency type associated with the backup policy.
- ingestion_schedules.#.frequency_interval- Number that indicates the frequency interval for a set of snapshots.
- ingestion_schedules.#.retention_unit- Unit of time in which MongoDB Atlas measures snapshot retention.
- ingestion_schedules.#.retention_value- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
 
- last_updated_ strdate 
- Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name str
- project_id str
- Unique 24-hexadecimal character string that identifies the project.- policyItemId- Unique 24-hexadecimal character string that identifies a policy item.
 
- sinks
Sequence[GetData Lake Pipeline Sink] 
- snapshots
Sequence[GetData Lake Pipeline Snapshot] 
- List of backup snapshots that you can use to trigger an on demand pipeline run.- snapshots.#.id- Unique 24-hexadecimal digit string that identifies the snapshot.
- snapshots.#.provider- Human-readable label that identifies the cloud provider that stores this snapshot.
- snapshots.#.created_at- Date and time when MongoDB Atlas took the snapshot.
- snapshots.#.expires_at- Date and time when MongoDB Atlas deletes the snapshot.
- snapshots.#.frequency_type- Human-readable label that identifies how often this snapshot triggers.
- snapshots.#.master_key- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.
- snapshots.#.mongod_version- Version of the MongoDB host that this snapshot backs up.
- snapshots.#.replica_set_name- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.
- snapshots.#.type- Human-readable label that categorizes the cluster as a replica set or sharded cluster.
- snapshots.#.snapshot_type- Human-readable label that identifies when this snapshot triggers.
- snapshots.#.status- Human-readable label that indicates the stage of the backup process for this snapshot.
- snapshots.#.size- List of backup snapshots that you can use to trigger an on demand pipeline run.
- snapshots.#.copy_region- List that identifies the regions to which MongoDB Atlas copies the snapshot.
- snapshots.#.policies- List that contains unique identifiers for the policy items.
 
- sources
Sequence[GetData Lake Pipeline Source] 
- state str
- State of this Data Lake Pipeline.
- transformations
Sequence[GetData Lake Pipeline Transformation] 
- Fields to be excluded for this Data Lake Pipeline.- transformations.#.field- Key in the document.
- transformations.#.type- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
 
- createdDate String
- Timestamp that indicates when the Data Lake Pipeline was created.
- id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- ingestionSchedules List<Property Map>
- List of backup schedule policy items that you can use as a Data Lake Pipeline source.- ingestion_schedules.#.id- Unique 24-hexadecimal digit string that identifies this backup policy item.
- ingestion_schedules.#.frequency_type- Human-readable label that identifies the frequency type associated with the backup policy.
- ingestion_schedules.#.frequency_interval- Number that indicates the frequency interval for a set of snapshots.
- ingestion_schedules.#.retention_unit- Unit of time in which MongoDB Atlas measures snapshot retention.
- ingestion_schedules.#.retention_value- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
 
- lastUpdated StringDate 
- Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name String
- projectId String
- Unique 24-hexadecimal character string that identifies the project.- policyItemId- Unique 24-hexadecimal character string that identifies a policy item.
 
- sinks List<Property Map>
- snapshots List<Property Map>
- List of backup snapshots that you can use to trigger an on demand pipeline run.- snapshots.#.id- Unique 24-hexadecimal digit string that identifies the snapshot.
- snapshots.#.provider- Human-readable label that identifies the cloud provider that stores this snapshot.
- snapshots.#.created_at- Date and time when MongoDB Atlas took the snapshot.
- snapshots.#.expires_at- Date and time when MongoDB Atlas deletes the snapshot.
- snapshots.#.frequency_type- Human-readable label that identifies how often this snapshot triggers.
- snapshots.#.master_key- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.
- snapshots.#.mongod_version- Version of the MongoDB host that this snapshot backs up.
- snapshots.#.replica_set_name- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.
- snapshots.#.type- Human-readable label that categorizes the cluster as a replica set or sharded cluster.
- snapshots.#.snapshot_type- Human-readable label that identifies when this snapshot triggers.
- snapshots.#.status- Human-readable label that indicates the stage of the backup process for this snapshot.
- snapshots.#.size- List of backup snapshots that you can use to trigger an on demand pipeline run.
- snapshots.#.copy_region- List that identifies the regions to which MongoDB Atlas copies the snapshot.
- snapshots.#.policies- List that contains unique identifiers for the policy items.
 
- sources List<Property Map>
- state String
- State of this Data Lake Pipeline.
- transformations List<Property Map>
- Fields to be excluded for this Data Lake Pipeline.- transformations.#.field- Key in the document.
- transformations.#.type- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
 
Supporting Types
GetDataLakePipelineIngestionSchedule     
- FrequencyInterval int
- FrequencyType string
- Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- RetentionUnit string
- RetentionValue int
- FrequencyInterval int
- FrequencyType string
- Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- RetentionUnit string
- RetentionValue int
- frequencyInterval Integer
- frequencyType String
- id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retentionUnit String
- retentionValue Integer
- frequencyInterval number
- frequencyType string
- id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retentionUnit string
- retentionValue number
- frequency_interval int
- frequency_type str
- id str
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention_unit str
- retention_value int
- frequencyInterval Number
- frequencyType String
- id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retentionUnit String
- retentionValue Number
GetDataLakePipelineSink    
- PartitionFields List<GetData Lake Pipeline Sink Partition Field> 
- Ordered fields used to physically organize data in the destination.- partition_fields.#.field_name- Human-readable label that identifies the field name used to partition data.
- partition_fields.#.order- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
 
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- PartitionFields []GetData Lake Pipeline Sink Partition Field 
- Ordered fields used to physically organize data in the destination.- partition_fields.#.field_name- Human-readable label that identifies the field name used to partition data.
- partition_fields.#.order- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
 
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- partitionFields List<GetData Lake Pipeline Sink Partition Field> 
- Ordered fields used to physically organize data in the destination.- partition_fields.#.field_name- Human-readable label that identifies the field name used to partition data.
- partition_fields.#.order- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
 
- provider String
- Target cloud provider for this Data Lake Pipeline.
- region String
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type String
- Type of ingestion source of this Data Lake Pipeline.
- partitionFields GetData Lake Pipeline Sink Partition Field[] 
- Ordered fields used to physically organize data in the destination.- partition_fields.#.field_name- Human-readable label that identifies the field name used to partition data.
- partition_fields.#.order- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
 
- provider string
- Target cloud provider for this Data Lake Pipeline.
- region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type string
- Type of ingestion source of this Data Lake Pipeline.
- partition_fields Sequence[GetData Lake Pipeline Sink Partition Field] 
- Ordered fields used to physically organize data in the destination.- partition_fields.#.field_name- Human-readable label that identifies the field name used to partition data.
- partition_fields.#.order- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
 
- provider str
- Target cloud provider for this Data Lake Pipeline.
- region str
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type str
- Type of ingestion source of this Data Lake Pipeline.
- partitionFields List<Property Map>
- Ordered fields used to physically organize data in the destination.- partition_fields.#.field_name- Human-readable label that identifies the field name used to partition data.
- partition_fields.#.order- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
 
- provider String
- Target cloud provider for this Data Lake Pipeline.
- region String
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type String
- Type of ingestion source of this Data Lake Pipeline.
GetDataLakePipelineSinkPartitionField      
- field_name str
- order int
GetDataLakePipelineSnapshot    
- CopyRegion string
- CreatedAt string
- ExpiresAt string
- FrequencyYype string
- Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- MasterKey string
- MongodVersion string
- Policies List<string>
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- ReplicaSet stringName 
- Size int
- Status string
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- CopyRegion string
- CreatedAt string
- ExpiresAt string
- FrequencyYype string
- Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- MasterKey string
- MongodVersion string
- Policies []string
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- ReplicaSet stringName 
- Size int
- Status string
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- copyRegion String
- createdAt String
- expiresAt String
- frequencyYype String
- id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- masterKey String
- mongodVersion String
- policies List<String>
- provider String
- Target cloud provider for this Data Lake Pipeline.
- replicaSet StringName 
- size Integer
- status String
- type String
- Type of ingestion source of this Data Lake Pipeline.
- copyRegion string
- createdAt string
- expiresAt string
- frequencyYype string
- id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- masterKey string
- mongodVersion string
- policies string[]
- provider string
- Target cloud provider for this Data Lake Pipeline.
- replicaSet stringName 
- size number
- status string
- type string
- Type of ingestion source of this Data Lake Pipeline.
- copy_region str
- created_at str
- expires_at str
- frequency_yype str
- id str
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master_key str
- mongod_version str
- policies Sequence[str]
- provider str
- Target cloud provider for this Data Lake Pipeline.
- replica_set_ strname 
- size int
- status str
- type str
- Type of ingestion source of this Data Lake Pipeline.
- copyRegion String
- createdAt String
- expiresAt String
- frequencyYype String
- id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- masterKey String
- mongodVersion String
- policies List<String>
- provider String
- Target cloud provider for this Data Lake Pipeline.
- replicaSet StringName 
- size Number
- status String
- type String
- Type of ingestion source of this Data Lake Pipeline.
GetDataLakePipelineSource    
- ClusterName string
- Human-readable name that identifies the cluster.
- CollectionName string
- Human-readable name that identifies the collection.
- DatabaseName string
- Human-readable name that identifies the database.
- ProjectId string
- The unique ID for the project to create a Data Lake Pipeline.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- ClusterName string
- Human-readable name that identifies the cluster.
- CollectionName string
- Human-readable name that identifies the collection.
- DatabaseName string
- Human-readable name that identifies the database.
- ProjectId string
- The unique ID for the project to create a Data Lake Pipeline.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- clusterName String
- Human-readable name that identifies the cluster.
- collectionName String
- Human-readable name that identifies the collection.
- databaseName String
- Human-readable name that identifies the database.
- projectId String
- The unique ID for the project to create a Data Lake Pipeline.
- type String
- Type of ingestion source of this Data Lake Pipeline.
- clusterName string
- Human-readable name that identifies the cluster.
- collectionName string
- Human-readable name that identifies the collection.
- databaseName string
- Human-readable name that identifies the database.
- projectId string
- The unique ID for the project to create a Data Lake Pipeline.
- type string
- Type of ingestion source of this Data Lake Pipeline.
- cluster_name str
- Human-readable name that identifies the cluster.
- collection_name str
- Human-readable name that identifies the collection.
- database_name str
- Human-readable name that identifies the database.
- project_id str
- The unique ID for the project to create a Data Lake Pipeline.
- type str
- Type of ingestion source of this Data Lake Pipeline.
- clusterName String
- Human-readable name that identifies the cluster.
- collectionName String
- Human-readable name that identifies the collection.
- databaseName String
- Human-readable name that identifies the database.
- projectId String
- The unique ID for the project to create a Data Lake Pipeline.
- type String
- Type of ingestion source of this Data Lake Pipeline.
GetDataLakePipelineTransformation    
Package Details
- Repository
- MongoDB Atlas pulumi/pulumi-mongodbatlas
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the mongodbatlasTerraform Provider.