aiven.KafkaConnect
Explore with Pulumi AI
Creates and manages an Aiven for Apache Kafka® Connect service. Kafka Connect lets you integrate an Aiven for Apache Kafka® service with external data sources using connectors.
To set up and integrate Kafka Connect:
- Create a Kafka service in the same Aiven project using the aiven.Kafkaresource.
- Create topics for importing and exporting data using aiven.KafkaTopic.
- Create the Kafka Connect service.
- Use the aiven.ServiceIntegrationresource to integrate the Kafka and Kafka Connect services.
- Add source and sink connectors using aiven.KafkaConnectorresource.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";
// Create a Kafka service.
const exampleKafka = new aiven.Kafka("example_kafka", {
    project: exampleProject.project,
    serviceName: "example-kafka-service",
    cloudName: "google-europe-west1",
    plan: "startup-2",
});
// Create a Kafka Connect service.
const exampleKafkaConnect = new aiven.KafkaConnect("example_kafka_connect", {
    project: exampleProject.project,
    cloudName: "google-europe-west1",
    plan: "startup-4",
    serviceName: "example-connect-service",
    kafkaConnectUserConfig: {
        kafkaConnect: {
            consumerIsolationLevel: "read_committed",
        },
        publicAccess: {
            kafkaConnect: true,
        },
    },
});
// Integrate the Kafka and Kafka Connect services.
const kafkaConnectIntegration = new aiven.ServiceIntegration("kafka_connect_integration", {
    project: exampleProject.project,
    integrationType: "kafka_connect",
    sourceServiceName: exampleKafka.serviceName,
    destinationServiceName: exampleKafkaConnect.serviceName,
    kafkaConnectUserConfig: {
        kafkaConnect: {
            groupId: "connect",
            statusStorageTopic: "__connect_status",
            offsetStorageTopic: "__connect_offsets",
        },
    },
});
import pulumi
import pulumi_aiven as aiven
# Create a Kafka service.
example_kafka = aiven.Kafka("example_kafka",
    project=example_project["project"],
    service_name="example-kafka-service",
    cloud_name="google-europe-west1",
    plan="startup-2")
# Create a Kafka Connect service.
example_kafka_connect = aiven.KafkaConnect("example_kafka_connect",
    project=example_project["project"],
    cloud_name="google-europe-west1",
    plan="startup-4",
    service_name="example-connect-service",
    kafka_connect_user_config={
        "kafka_connect": {
            "consumer_isolation_level": "read_committed",
        },
        "public_access": {
            "kafka_connect": True,
        },
    })
# Integrate the Kafka and Kafka Connect services.
kafka_connect_integration = aiven.ServiceIntegration("kafka_connect_integration",
    project=example_project["project"],
    integration_type="kafka_connect",
    source_service_name=example_kafka.service_name,
    destination_service_name=example_kafka_connect.service_name,
    kafka_connect_user_config={
        "kafka_connect": {
            "group_id": "connect",
            "status_storage_topic": "__connect_status",
            "offset_storage_topic": "__connect_offsets",
        },
    })
package main
import (
	"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		// Create a Kafka service.
		exampleKafka, err := aiven.NewKafka(ctx, "example_kafka", &aiven.KafkaArgs{
			Project:     pulumi.Any(exampleProject.Project),
			ServiceName: pulumi.String("example-kafka-service"),
			CloudName:   pulumi.String("google-europe-west1"),
			Plan:        pulumi.String("startup-2"),
		})
		if err != nil {
			return err
		}
		// Create a Kafka Connect service.
		exampleKafkaConnect, err := aiven.NewKafkaConnect(ctx, "example_kafka_connect", &aiven.KafkaConnectArgs{
			Project:     pulumi.Any(exampleProject.Project),
			CloudName:   pulumi.String("google-europe-west1"),
			Plan:        pulumi.String("startup-4"),
			ServiceName: pulumi.String("example-connect-service"),
			KafkaConnectUserConfig: &aiven.KafkaConnectKafkaConnectUserConfigArgs{
				KafkaConnect: &aiven.KafkaConnectKafkaConnectUserConfigKafkaConnectArgs{
					ConsumerIsolationLevel: pulumi.String("read_committed"),
				},
				PublicAccess: &aiven.KafkaConnectKafkaConnectUserConfigPublicAccessArgs{
					KafkaConnect: pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		// Integrate the Kafka and Kafka Connect services.
		_, err = aiven.NewServiceIntegration(ctx, "kafka_connect_integration", &aiven.ServiceIntegrationArgs{
			Project:                pulumi.Any(exampleProject.Project),
			IntegrationType:        pulumi.String("kafka_connect"),
			SourceServiceName:      exampleKafka.ServiceName,
			DestinationServiceName: exampleKafkaConnect.ServiceName,
			KafkaConnectUserConfig: &aiven.ServiceIntegrationKafkaConnectUserConfigArgs{
				KafkaConnect: &aiven.ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs{
					GroupId:            pulumi.String("connect"),
					StatusStorageTopic: pulumi.String("__connect_status"),
					OffsetStorageTopic: pulumi.String("__connect_offsets"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aiven = Pulumi.Aiven;
return await Deployment.RunAsync(() => 
{
    // Create a Kafka service.
    var exampleKafka = new Aiven.Kafka("example_kafka", new()
    {
        Project = exampleProject.Project,
        ServiceName = "example-kafka-service",
        CloudName = "google-europe-west1",
        Plan = "startup-2",
    });
    // Create a Kafka Connect service.
    var exampleKafkaConnect = new Aiven.KafkaConnect("example_kafka_connect", new()
    {
        Project = exampleProject.Project,
        CloudName = "google-europe-west1",
        Plan = "startup-4",
        ServiceName = "example-connect-service",
        KafkaConnectUserConfig = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigArgs
        {
            KafkaConnect = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigKafkaConnectArgs
            {
                ConsumerIsolationLevel = "read_committed",
            },
            PublicAccess = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigPublicAccessArgs
            {
                KafkaConnect = true,
            },
        },
    });
    // Integrate the Kafka and Kafka Connect services.
    var kafkaConnectIntegration = new Aiven.ServiceIntegration("kafka_connect_integration", new()
    {
        Project = exampleProject.Project,
        IntegrationType = "kafka_connect",
        SourceServiceName = exampleKafka.ServiceName,
        DestinationServiceName = exampleKafkaConnect.ServiceName,
        KafkaConnectUserConfig = new Aiven.Inputs.ServiceIntegrationKafkaConnectUserConfigArgs
        {
            KafkaConnect = new Aiven.Inputs.ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs
            {
                GroupId = "connect",
                StatusStorageTopic = "__connect_status",
                OffsetStorageTopic = "__connect_offsets",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aiven.Kafka;
import com.pulumi.aiven.KafkaArgs;
import com.pulumi.aiven.KafkaConnect;
import com.pulumi.aiven.KafkaConnectArgs;
import com.pulumi.aiven.inputs.KafkaConnectKafkaConnectUserConfigArgs;
import com.pulumi.aiven.inputs.KafkaConnectKafkaConnectUserConfigKafkaConnectArgs;
import com.pulumi.aiven.inputs.KafkaConnectKafkaConnectUserConfigPublicAccessArgs;
import com.pulumi.aiven.ServiceIntegration;
import com.pulumi.aiven.ServiceIntegrationArgs;
import com.pulumi.aiven.inputs.ServiceIntegrationKafkaConnectUserConfigArgs;
import com.pulumi.aiven.inputs.ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        // Create a Kafka service.
        var exampleKafka = new Kafka("exampleKafka", KafkaArgs.builder()
            .project(exampleProject.project())
            .serviceName("example-kafka-service")
            .cloudName("google-europe-west1")
            .plan("startup-2")
            .build());
        // Create a Kafka Connect service.
        var exampleKafkaConnect = new KafkaConnect("exampleKafkaConnect", KafkaConnectArgs.builder()
            .project(exampleProject.project())
            .cloudName("google-europe-west1")
            .plan("startup-4")
            .serviceName("example-connect-service")
            .kafkaConnectUserConfig(KafkaConnectKafkaConnectUserConfigArgs.builder()
                .kafkaConnect(KafkaConnectKafkaConnectUserConfigKafkaConnectArgs.builder()
                    .consumerIsolationLevel("read_committed")
                    .build())
                .publicAccess(KafkaConnectKafkaConnectUserConfigPublicAccessArgs.builder()
                    .kafkaConnect(true)
                    .build())
                .build())
            .build());
        // Integrate the Kafka and Kafka Connect services.
        var kafkaConnectIntegration = new ServiceIntegration("kafkaConnectIntegration", ServiceIntegrationArgs.builder()
            .project(exampleProject.project())
            .integrationType("kafka_connect")
            .sourceServiceName(exampleKafka.serviceName())
            .destinationServiceName(exampleKafkaConnect.serviceName())
            .kafkaConnectUserConfig(ServiceIntegrationKafkaConnectUserConfigArgs.builder()
                .kafkaConnect(ServiceIntegrationKafkaConnectUserConfigKafkaConnectArgs.builder()
                    .groupId("connect")
                    .statusStorageTopic("__connect_status")
                    .offsetStorageTopic("__connect_offsets")
                    .build())
                .build())
            .build());
    }
}
resources:
  # Create a Kafka service.
  exampleKafka:
    type: aiven:Kafka
    name: example_kafka
    properties:
      project: ${exampleProject.project}
      serviceName: example-kafka-service
      cloudName: google-europe-west1
      plan: startup-2
  # Create a Kafka Connect service.
  exampleKafkaConnect:
    type: aiven:KafkaConnect
    name: example_kafka_connect
    properties:
      project: ${exampleProject.project}
      cloudName: google-europe-west1
      plan: startup-4
      serviceName: example-connect-service
      kafkaConnectUserConfig:
        kafkaConnect:
          consumerIsolationLevel: read_committed
        publicAccess:
          kafkaConnect: true
  # Integrate the Kafka and Kafka Connect services.
  kafkaConnectIntegration:
    type: aiven:ServiceIntegration
    name: kafka_connect_integration
    properties:
      project: ${exampleProject.project}
      integrationType: kafka_connect
      sourceServiceName: ${exampleKafka.serviceName}
      destinationServiceName: ${exampleKafkaConnect.serviceName}
      kafkaConnectUserConfig:
        kafkaConnect:
          groupId: connect
          statusStorageTopic: __connect_status
          offsetStorageTopic: __connect_offsets
Create KafkaConnect Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new KafkaConnect(name: string, args: KafkaConnectArgs, opts?: CustomResourceOptions);@overload
def KafkaConnect(resource_name: str,
                 args: KafkaConnectArgs,
                 opts: Optional[ResourceOptions] = None)
@overload
def KafkaConnect(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 plan: Optional[str] = None,
                 service_name: Optional[str] = None,
                 project: Optional[str] = None,
                 kafka_connect_user_config: Optional[KafkaConnectKafkaConnectUserConfigArgs] = None,
                 maintenance_window_dow: Optional[str] = None,
                 maintenance_window_time: Optional[str] = None,
                 additional_disk_space: Optional[str] = None,
                 disk_space: Optional[str] = None,
                 project_vpc_id: Optional[str] = None,
                 service_integrations: Optional[Sequence[KafkaConnectServiceIntegrationArgs]] = None,
                 cloud_name: Optional[str] = None,
                 static_ips: Optional[Sequence[str]] = None,
                 tags: Optional[Sequence[KafkaConnectTagArgs]] = None,
                 tech_emails: Optional[Sequence[KafkaConnectTechEmailArgs]] = None,
                 termination_protection: Optional[bool] = None)func NewKafkaConnect(ctx *Context, name string, args KafkaConnectArgs, opts ...ResourceOption) (*KafkaConnect, error)public KafkaConnect(string name, KafkaConnectArgs args, CustomResourceOptions? opts = null)
public KafkaConnect(String name, KafkaConnectArgs args)
public KafkaConnect(String name, KafkaConnectArgs args, CustomResourceOptions options)
type: aiven:KafkaConnect
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args KafkaConnectArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args KafkaConnectArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args KafkaConnectArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args KafkaConnectArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args KafkaConnectArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var kafkaConnectResource = new Aiven.KafkaConnect("kafkaConnectResource", new()
{
    Plan = "string",
    ServiceName = "string",
    Project = "string",
    KafkaConnectUserConfig = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigArgs
    {
        IpFilterObjects = new[]
        {
            new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigIpFilterObjectArgs
            {
                Network = "string",
                Description = "string",
            },
        },
        IpFilterStrings = new[]
        {
            "string",
        },
        KafkaConnect = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigKafkaConnectArgs
        {
            ConnectorClientConfigOverridePolicy = "string",
            ConsumerAutoOffsetReset = "string",
            ConsumerFetchMaxBytes = 0,
            ConsumerIsolationLevel = "string",
            ConsumerMaxPartitionFetchBytes = 0,
            ConsumerMaxPollIntervalMs = 0,
            ConsumerMaxPollRecords = 0,
            OffsetFlushIntervalMs = 0,
            OffsetFlushTimeoutMs = 0,
            ProducerBatchSize = 0,
            ProducerBufferMemory = 0,
            ProducerCompressionType = "string",
            ProducerLingerMs = 0,
            ProducerMaxRequestSize = 0,
            ScheduledRebalanceMaxDelayMs = 0,
            SessionTimeoutMs = 0,
        },
        PluginVersions = new[]
        {
            new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigPluginVersionArgs
            {
                PluginName = "string",
                Version = "string",
            },
        },
        PrivateAccess = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigPrivateAccessArgs
        {
            KafkaConnect = false,
            Prometheus = false,
        },
        PrivatelinkAccess = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigPrivatelinkAccessArgs
        {
            Jolokia = false,
            KafkaConnect = false,
            Prometheus = false,
        },
        PublicAccess = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigPublicAccessArgs
        {
            KafkaConnect = false,
            Prometheus = false,
        },
        SecretProviders = new[]
        {
            new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigSecretProviderArgs
            {
                Name = "string",
                Aws = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigSecretProviderAwsArgs
                {
                    AuthMethod = "string",
                    Region = "string",
                    AccessKey = "string",
                    SecretKey = "string",
                },
                Vault = new Aiven.Inputs.KafkaConnectKafkaConnectUserConfigSecretProviderVaultArgs
                {
                    Address = "string",
                    AuthMethod = "string",
                    EngineVersion = 0,
                    PrefixPathDepth = 0,
                    Token = "string",
                },
            },
        },
        ServiceLog = false,
        StaticIps = false,
    },
    MaintenanceWindowDow = "string",
    MaintenanceWindowTime = "string",
    AdditionalDiskSpace = "string",
    ProjectVpcId = "string",
    ServiceIntegrations = new[]
    {
        new Aiven.Inputs.KafkaConnectServiceIntegrationArgs
        {
            IntegrationType = "string",
            SourceServiceName = "string",
        },
    },
    CloudName = "string",
    StaticIps = new[]
    {
        "string",
    },
    Tags = new[]
    {
        new Aiven.Inputs.KafkaConnectTagArgs
        {
            Key = "string",
            Value = "string",
        },
    },
    TechEmails = new[]
    {
        new Aiven.Inputs.KafkaConnectTechEmailArgs
        {
            Email = "string",
        },
    },
    TerminationProtection = false,
});
example, err := aiven.NewKafkaConnect(ctx, "kafkaConnectResource", &aiven.KafkaConnectArgs{
	Plan:        pulumi.String("string"),
	ServiceName: pulumi.String("string"),
	Project:     pulumi.String("string"),
	KafkaConnectUserConfig: &aiven.KafkaConnectKafkaConnectUserConfigArgs{
		IpFilterObjects: aiven.KafkaConnectKafkaConnectUserConfigIpFilterObjectArray{
			&aiven.KafkaConnectKafkaConnectUserConfigIpFilterObjectArgs{
				Network:     pulumi.String("string"),
				Description: pulumi.String("string"),
			},
		},
		IpFilterStrings: pulumi.StringArray{
			pulumi.String("string"),
		},
		KafkaConnect: &aiven.KafkaConnectKafkaConnectUserConfigKafkaConnectArgs{
			ConnectorClientConfigOverridePolicy: pulumi.String("string"),
			ConsumerAutoOffsetReset:             pulumi.String("string"),
			ConsumerFetchMaxBytes:               pulumi.Int(0),
			ConsumerIsolationLevel:              pulumi.String("string"),
			ConsumerMaxPartitionFetchBytes:      pulumi.Int(0),
			ConsumerMaxPollIntervalMs:           pulumi.Int(0),
			ConsumerMaxPollRecords:              pulumi.Int(0),
			OffsetFlushIntervalMs:               pulumi.Int(0),
			OffsetFlushTimeoutMs:                pulumi.Int(0),
			ProducerBatchSize:                   pulumi.Int(0),
			ProducerBufferMemory:                pulumi.Int(0),
			ProducerCompressionType:             pulumi.String("string"),
			ProducerLingerMs:                    pulumi.Int(0),
			ProducerMaxRequestSize:              pulumi.Int(0),
			ScheduledRebalanceMaxDelayMs:        pulumi.Int(0),
			SessionTimeoutMs:                    pulumi.Int(0),
		},
		PluginVersions: aiven.KafkaConnectKafkaConnectUserConfigPluginVersionArray{
			&aiven.KafkaConnectKafkaConnectUserConfigPluginVersionArgs{
				PluginName: pulumi.String("string"),
				Version:    pulumi.String("string"),
			},
		},
		PrivateAccess: &aiven.KafkaConnectKafkaConnectUserConfigPrivateAccessArgs{
			KafkaConnect: pulumi.Bool(false),
			Prometheus:   pulumi.Bool(false),
		},
		PrivatelinkAccess: &aiven.KafkaConnectKafkaConnectUserConfigPrivatelinkAccessArgs{
			Jolokia:      pulumi.Bool(false),
			KafkaConnect: pulumi.Bool(false),
			Prometheus:   pulumi.Bool(false),
		},
		PublicAccess: &aiven.KafkaConnectKafkaConnectUserConfigPublicAccessArgs{
			KafkaConnect: pulumi.Bool(false),
			Prometheus:   pulumi.Bool(false),
		},
		SecretProviders: aiven.KafkaConnectKafkaConnectUserConfigSecretProviderArray{
			&aiven.KafkaConnectKafkaConnectUserConfigSecretProviderArgs{
				Name: pulumi.String("string"),
				Aws: &aiven.KafkaConnectKafkaConnectUserConfigSecretProviderAwsArgs{
					AuthMethod: pulumi.String("string"),
					Region:     pulumi.String("string"),
					AccessKey:  pulumi.String("string"),
					SecretKey:  pulumi.String("string"),
				},
				Vault: &aiven.KafkaConnectKafkaConnectUserConfigSecretProviderVaultArgs{
					Address:         pulumi.String("string"),
					AuthMethod:      pulumi.String("string"),
					EngineVersion:   pulumi.Int(0),
					PrefixPathDepth: pulumi.Int(0),
					Token:           pulumi.String("string"),
				},
			},
		},
		ServiceLog: pulumi.Bool(false),
		StaticIps:  pulumi.Bool(false),
	},
	MaintenanceWindowDow:  pulumi.String("string"),
	MaintenanceWindowTime: pulumi.String("string"),
	AdditionalDiskSpace:   pulumi.String("string"),
	ProjectVpcId:          pulumi.String("string"),
	ServiceIntegrations: aiven.KafkaConnectServiceIntegrationArray{
		&aiven.KafkaConnectServiceIntegrationArgs{
			IntegrationType:   pulumi.String("string"),
			SourceServiceName: pulumi.String("string"),
		},
	},
	CloudName: pulumi.String("string"),
	StaticIps: pulumi.StringArray{
		pulumi.String("string"),
	},
	Tags: aiven.KafkaConnectTagArray{
		&aiven.KafkaConnectTagArgs{
			Key:   pulumi.String("string"),
			Value: pulumi.String("string"),
		},
	},
	TechEmails: aiven.KafkaConnectTechEmailArray{
		&aiven.KafkaConnectTechEmailArgs{
			Email: pulumi.String("string"),
		},
	},
	TerminationProtection: pulumi.Bool(false),
})
var kafkaConnectResource = new KafkaConnect("kafkaConnectResource", KafkaConnectArgs.builder()
    .plan("string")
    .serviceName("string")
    .project("string")
    .kafkaConnectUserConfig(KafkaConnectKafkaConnectUserConfigArgs.builder()
        .ipFilterObjects(KafkaConnectKafkaConnectUserConfigIpFilterObjectArgs.builder()
            .network("string")
            .description("string")
            .build())
        .ipFilterStrings("string")
        .kafkaConnect(KafkaConnectKafkaConnectUserConfigKafkaConnectArgs.builder()
            .connectorClientConfigOverridePolicy("string")
            .consumerAutoOffsetReset("string")
            .consumerFetchMaxBytes(0)
            .consumerIsolationLevel("string")
            .consumerMaxPartitionFetchBytes(0)
            .consumerMaxPollIntervalMs(0)
            .consumerMaxPollRecords(0)
            .offsetFlushIntervalMs(0)
            .offsetFlushTimeoutMs(0)
            .producerBatchSize(0)
            .producerBufferMemory(0)
            .producerCompressionType("string")
            .producerLingerMs(0)
            .producerMaxRequestSize(0)
            .scheduledRebalanceMaxDelayMs(0)
            .sessionTimeoutMs(0)
            .build())
        .pluginVersions(KafkaConnectKafkaConnectUserConfigPluginVersionArgs.builder()
            .pluginName("string")
            .version("string")
            .build())
        .privateAccess(KafkaConnectKafkaConnectUserConfigPrivateAccessArgs.builder()
            .kafkaConnect(false)
            .prometheus(false)
            .build())
        .privatelinkAccess(KafkaConnectKafkaConnectUserConfigPrivatelinkAccessArgs.builder()
            .jolokia(false)
            .kafkaConnect(false)
            .prometheus(false)
            .build())
        .publicAccess(KafkaConnectKafkaConnectUserConfigPublicAccessArgs.builder()
            .kafkaConnect(false)
            .prometheus(false)
            .build())
        .secretProviders(KafkaConnectKafkaConnectUserConfigSecretProviderArgs.builder()
            .name("string")
            .aws(KafkaConnectKafkaConnectUserConfigSecretProviderAwsArgs.builder()
                .authMethod("string")
                .region("string")
                .accessKey("string")
                .secretKey("string")
                .build())
            .vault(KafkaConnectKafkaConnectUserConfigSecretProviderVaultArgs.builder()
                .address("string")
                .authMethod("string")
                .engineVersion(0)
                .prefixPathDepth(0)
                .token("string")
                .build())
            .build())
        .serviceLog(false)
        .staticIps(false)
        .build())
    .maintenanceWindowDow("string")
    .maintenanceWindowTime("string")
    .additionalDiskSpace("string")
    .projectVpcId("string")
    .serviceIntegrations(KafkaConnectServiceIntegrationArgs.builder()
        .integrationType("string")
        .sourceServiceName("string")
        .build())
    .cloudName("string")
    .staticIps("string")
    .tags(KafkaConnectTagArgs.builder()
        .key("string")
        .value("string")
        .build())
    .techEmails(KafkaConnectTechEmailArgs.builder()
        .email("string")
        .build())
    .terminationProtection(false)
    .build());
kafka_connect_resource = aiven.KafkaConnect("kafkaConnectResource",
    plan="string",
    service_name="string",
    project="string",
    kafka_connect_user_config={
        "ip_filter_objects": [{
            "network": "string",
            "description": "string",
        }],
        "ip_filter_strings": ["string"],
        "kafka_connect": {
            "connector_client_config_override_policy": "string",
            "consumer_auto_offset_reset": "string",
            "consumer_fetch_max_bytes": 0,
            "consumer_isolation_level": "string",
            "consumer_max_partition_fetch_bytes": 0,
            "consumer_max_poll_interval_ms": 0,
            "consumer_max_poll_records": 0,
            "offset_flush_interval_ms": 0,
            "offset_flush_timeout_ms": 0,
            "producer_batch_size": 0,
            "producer_buffer_memory": 0,
            "producer_compression_type": "string",
            "producer_linger_ms": 0,
            "producer_max_request_size": 0,
            "scheduled_rebalance_max_delay_ms": 0,
            "session_timeout_ms": 0,
        },
        "plugin_versions": [{
            "plugin_name": "string",
            "version": "string",
        }],
        "private_access": {
            "kafka_connect": False,
            "prometheus": False,
        },
        "privatelink_access": {
            "jolokia": False,
            "kafka_connect": False,
            "prometheus": False,
        },
        "public_access": {
            "kafka_connect": False,
            "prometheus": False,
        },
        "secret_providers": [{
            "name": "string",
            "aws": {
                "auth_method": "string",
                "region": "string",
                "access_key": "string",
                "secret_key": "string",
            },
            "vault": {
                "address": "string",
                "auth_method": "string",
                "engine_version": 0,
                "prefix_path_depth": 0,
                "token": "string",
            },
        }],
        "service_log": False,
        "static_ips": False,
    },
    maintenance_window_dow="string",
    maintenance_window_time="string",
    additional_disk_space="string",
    project_vpc_id="string",
    service_integrations=[{
        "integration_type": "string",
        "source_service_name": "string",
    }],
    cloud_name="string",
    static_ips=["string"],
    tags=[{
        "key": "string",
        "value": "string",
    }],
    tech_emails=[{
        "email": "string",
    }],
    termination_protection=False)
const kafkaConnectResource = new aiven.KafkaConnect("kafkaConnectResource", {
    plan: "string",
    serviceName: "string",
    project: "string",
    kafkaConnectUserConfig: {
        ipFilterObjects: [{
            network: "string",
            description: "string",
        }],
        ipFilterStrings: ["string"],
        kafkaConnect: {
            connectorClientConfigOverridePolicy: "string",
            consumerAutoOffsetReset: "string",
            consumerFetchMaxBytes: 0,
            consumerIsolationLevel: "string",
            consumerMaxPartitionFetchBytes: 0,
            consumerMaxPollIntervalMs: 0,
            consumerMaxPollRecords: 0,
            offsetFlushIntervalMs: 0,
            offsetFlushTimeoutMs: 0,
            producerBatchSize: 0,
            producerBufferMemory: 0,
            producerCompressionType: "string",
            producerLingerMs: 0,
            producerMaxRequestSize: 0,
            scheduledRebalanceMaxDelayMs: 0,
            sessionTimeoutMs: 0,
        },
        pluginVersions: [{
            pluginName: "string",
            version: "string",
        }],
        privateAccess: {
            kafkaConnect: false,
            prometheus: false,
        },
        privatelinkAccess: {
            jolokia: false,
            kafkaConnect: false,
            prometheus: false,
        },
        publicAccess: {
            kafkaConnect: false,
            prometheus: false,
        },
        secretProviders: [{
            name: "string",
            aws: {
                authMethod: "string",
                region: "string",
                accessKey: "string",
                secretKey: "string",
            },
            vault: {
                address: "string",
                authMethod: "string",
                engineVersion: 0,
                prefixPathDepth: 0,
                token: "string",
            },
        }],
        serviceLog: false,
        staticIps: false,
    },
    maintenanceWindowDow: "string",
    maintenanceWindowTime: "string",
    additionalDiskSpace: "string",
    projectVpcId: "string",
    serviceIntegrations: [{
        integrationType: "string",
        sourceServiceName: "string",
    }],
    cloudName: "string",
    staticIps: ["string"],
    tags: [{
        key: "string",
        value: "string",
    }],
    techEmails: [{
        email: "string",
    }],
    terminationProtection: false,
});
type: aiven:KafkaConnect
properties:
    additionalDiskSpace: string
    cloudName: string
    kafkaConnectUserConfig:
        ipFilterObjects:
            - description: string
              network: string
        ipFilterStrings:
            - string
        kafkaConnect:
            connectorClientConfigOverridePolicy: string
            consumerAutoOffsetReset: string
            consumerFetchMaxBytes: 0
            consumerIsolationLevel: string
            consumerMaxPartitionFetchBytes: 0
            consumerMaxPollIntervalMs: 0
            consumerMaxPollRecords: 0
            offsetFlushIntervalMs: 0
            offsetFlushTimeoutMs: 0
            producerBatchSize: 0
            producerBufferMemory: 0
            producerCompressionType: string
            producerLingerMs: 0
            producerMaxRequestSize: 0
            scheduledRebalanceMaxDelayMs: 0
            sessionTimeoutMs: 0
        pluginVersions:
            - pluginName: string
              version: string
        privateAccess:
            kafkaConnect: false
            prometheus: false
        privatelinkAccess:
            jolokia: false
            kafkaConnect: false
            prometheus: false
        publicAccess:
            kafkaConnect: false
            prometheus: false
        secretProviders:
            - aws:
                accessKey: string
                authMethod: string
                region: string
                secretKey: string
              name: string
              vault:
                address: string
                authMethod: string
                engineVersion: 0
                prefixPathDepth: 0
                token: string
        serviceLog: false
        staticIps: false
    maintenanceWindowDow: string
    maintenanceWindowTime: string
    plan: string
    project: string
    projectVpcId: string
    serviceIntegrations:
        - integrationType: string
          sourceServiceName: string
    serviceName: string
    staticIps:
        - string
    tags:
        - key: string
          value: string
    techEmails:
        - email: string
    terminationProtection: false
KafkaConnect Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The KafkaConnect resource accepts the following input properties:
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- ServiceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- AdditionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- CloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- DiskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- KafkaConnect KafkaUser Config Connect Kafka Connect User Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- MaintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- MaintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- ProjectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- ServiceIntegrations List<KafkaConnect Service Integration> 
- Service integrations to specify when creating a service. Not applied after initial service creation
- StaticIps List<string>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
List<KafkaConnect Tag> 
- Tags are key-value pairs that allow you to categorize services.
- TechEmails List<KafkaConnect Tech Email> 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- TerminationProtection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- ServiceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- AdditionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- CloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- DiskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- KafkaConnect KafkaUser Config Connect Kafka Connect User Config Args 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- MaintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- MaintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- ProjectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- ServiceIntegrations []KafkaConnect Service Integration Args 
- Service integrations to specify when creating a service. Not applied after initial service creation
- StaticIps []string
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
[]KafkaConnect Tag Args 
- Tags are key-value pairs that allow you to categorize services.
- TechEmails []KafkaConnect Tech Email Args 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- TerminationProtection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- serviceName String
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additionalDisk StringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- cloudName String
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- diskSpace String
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- kafkaConnect KafkaUser Config Connect Kafka Connect User Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- maintenanceWindow StringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow StringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- projectVpc StringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceIntegrations List<KafkaConnect Service Integration> 
- Service integrations to specify when creating a service. Not applied after initial service creation
- staticIps List<String>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
List<KafkaConnect Tag> 
- Tags are key-value pairs that allow you to categorize services.
- techEmails List<KafkaConnect Tech Email> 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection Boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- serviceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- cloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- diskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- kafkaConnect KafkaUser Config Connect Kafka Connect User Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- maintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- projectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceIntegrations KafkaConnect Service Integration[] 
- Service integrations to specify when creating a service. Not applied after initial service creation
- staticIps string[]
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
KafkaConnect Tag[] 
- Tags are key-value pairs that allow you to categorize services.
- techEmails KafkaConnect Tech Email[] 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan str
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project str
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- service_name str
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additional_disk_ strspace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- cloud_name str
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- disk_space str
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- kafka_connect_ Kafkauser_ config Connect Kafka Connect User Config Args 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- maintenance_window_ strdow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance_window_ strtime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- project_vpc_ strid 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_integrations Sequence[KafkaConnect Service Integration Args] 
- Service integrations to specify when creating a service. Not applied after initial service creation
- static_ips Sequence[str]
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
Sequence[KafkaConnect Tag Args] 
- Tags are key-value pairs that allow you to categorize services.
- tech_emails Sequence[KafkaConnect Tech Email Args] 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- termination_protection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- serviceName String
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- additionalDisk StringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- cloudName String
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- diskSpace String
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- kafkaConnect Property MapUser Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- maintenanceWindow StringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow StringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- projectVpc StringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceIntegrations List<Property Map>
- Service integrations to specify when creating a service. Not applied after initial service creation
- staticIps List<String>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- List<Property Map>
- Tags are key-value pairs that allow you to categorize services.
- techEmails List<Property Map>
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection Boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Outputs
All input properties are implicitly available as output properties. Additionally, the KafkaConnect resource produces the following output properties:
- Components
List<KafkaConnect Component> 
- Service component information objects
- DiskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- DiskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- DiskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- DiskSpace stringUsed 
- Disk space that service is currently using
- Id string
- The provider-assigned unique ID for this managed resource.
- ServiceHost string
- The hostname of the service.
- ServicePassword string
- Password used for connecting to the service, if applicable
- ServicePort int
- The port of the service
- ServiceType string
- Aiven internal service type code
- ServiceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- ServiceUsername string
- Username used for connecting to the service, if applicable
- State string
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- Components
[]KafkaConnect Component 
- Service component information objects
- DiskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- DiskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- DiskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- DiskSpace stringUsed 
- Disk space that service is currently using
- Id string
- The provider-assigned unique ID for this managed resource.
- ServiceHost string
- The hostname of the service.
- ServicePassword string
- Password used for connecting to the service, if applicable
- ServicePort int
- The port of the service
- ServiceType string
- Aiven internal service type code
- ServiceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- ServiceUsername string
- Username used for connecting to the service, if applicable
- State string
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- components
List<KafkaConnect Component> 
- Service component information objects
- diskSpace StringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace StringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace StringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace StringUsed 
- Disk space that service is currently using
- id String
- The provider-assigned unique ID for this managed resource.
- serviceHost String
- The hostname of the service.
- servicePassword String
- Password used for connecting to the service, if applicable
- servicePort Integer
- The port of the service
- serviceType String
- Aiven internal service type code
- serviceUri String
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername String
- Username used for connecting to the service, if applicable
- state String
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- components
KafkaConnect Component[] 
- Service component information objects
- diskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace stringUsed 
- Disk space that service is currently using
- id string
- The provider-assigned unique ID for this managed resource.
- serviceHost string
- The hostname of the service.
- servicePassword string
- Password used for connecting to the service, if applicable
- servicePort number
- The port of the service
- serviceType string
- Aiven internal service type code
- serviceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername string
- Username used for connecting to the service, if applicable
- state string
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- components
Sequence[KafkaConnect Component] 
- Service component information objects
- disk_space_ strcap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk_space_ strdefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- disk_space_ strstep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- disk_space_ strused 
- Disk space that service is currently using
- id str
- The provider-assigned unique ID for this managed resource.
- service_host str
- The hostname of the service.
- service_password str
- Password used for connecting to the service, if applicable
- service_port int
- The port of the service
- service_type str
- Aiven internal service type code
- service_uri str
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service_username str
- Username used for connecting to the service, if applicable
- state str
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- components List<Property Map>
- Service component information objects
- diskSpace StringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace StringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace StringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace StringUsed 
- Disk space that service is currently using
- id String
- The provider-assigned unique ID for this managed resource.
- serviceHost String
- The hostname of the service.
- servicePassword String
- Password used for connecting to the service, if applicable
- servicePort Number
- The port of the service
- serviceType String
- Aiven internal service type code
- serviceUri String
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername String
- Username used for connecting to the service, if applicable
- state String
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
Look up Existing KafkaConnect Resource
Get an existing KafkaConnect resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: KafkaConnectState, opts?: CustomResourceOptions): KafkaConnect@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        additional_disk_space: Optional[str] = None,
        cloud_name: Optional[str] = None,
        components: Optional[Sequence[KafkaConnectComponentArgs]] = None,
        disk_space: Optional[str] = None,
        disk_space_cap: Optional[str] = None,
        disk_space_default: Optional[str] = None,
        disk_space_step: Optional[str] = None,
        disk_space_used: Optional[str] = None,
        kafka_connect_user_config: Optional[KafkaConnectKafkaConnectUserConfigArgs] = None,
        maintenance_window_dow: Optional[str] = None,
        maintenance_window_time: Optional[str] = None,
        plan: Optional[str] = None,
        project: Optional[str] = None,
        project_vpc_id: Optional[str] = None,
        service_host: Optional[str] = None,
        service_integrations: Optional[Sequence[KafkaConnectServiceIntegrationArgs]] = None,
        service_name: Optional[str] = None,
        service_password: Optional[str] = None,
        service_port: Optional[int] = None,
        service_type: Optional[str] = None,
        service_uri: Optional[str] = None,
        service_username: Optional[str] = None,
        state: Optional[str] = None,
        static_ips: Optional[Sequence[str]] = None,
        tags: Optional[Sequence[KafkaConnectTagArgs]] = None,
        tech_emails: Optional[Sequence[KafkaConnectTechEmailArgs]] = None,
        termination_protection: Optional[bool] = None) -> KafkaConnectfunc GetKafkaConnect(ctx *Context, name string, id IDInput, state *KafkaConnectState, opts ...ResourceOption) (*KafkaConnect, error)public static KafkaConnect Get(string name, Input<string> id, KafkaConnectState? state, CustomResourceOptions? opts = null)public static KafkaConnect get(String name, Output<String> id, KafkaConnectState state, CustomResourceOptions options)resources:  _:    type: aiven:KafkaConnect    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- AdditionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- CloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- Components
List<KafkaConnect Component> 
- Service component information objects
- DiskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- DiskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- DiskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- DiskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- DiskSpace stringUsed 
- Disk space that service is currently using
- KafkaConnect KafkaUser Config Connect Kafka Connect User Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- MaintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- MaintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- ProjectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- ServiceHost string
- The hostname of the service.
- ServiceIntegrations List<KafkaConnect Service Integration> 
- Service integrations to specify when creating a service. Not applied after initial service creation
- ServiceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- ServicePassword string
- Password used for connecting to the service, if applicable
- ServicePort int
- The port of the service
- ServiceType string
- Aiven internal service type code
- ServiceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- ServiceUsername string
- Username used for connecting to the service, if applicable
- State string
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- StaticIps List<string>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
List<KafkaConnect Tag> 
- Tags are key-value pairs that allow you to categorize services.
- TechEmails List<KafkaConnect Tech Email> 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- TerminationProtection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- AdditionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- CloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- Components
[]KafkaConnect Component Args 
- Service component information objects
- DiskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- DiskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- DiskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- DiskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- DiskSpace stringUsed 
- Disk space that service is currently using
- KafkaConnect KafkaUser Config Connect Kafka Connect User Config Args 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- MaintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- MaintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- Plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- Project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- ProjectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- ServiceHost string
- The hostname of the service.
- ServiceIntegrations []KafkaConnect Service Integration Args 
- Service integrations to specify when creating a service. Not applied after initial service creation
- ServiceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- ServicePassword string
- Password used for connecting to the service, if applicable
- ServicePort int
- The port of the service
- ServiceType string
- Aiven internal service type code
- ServiceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- ServiceUsername string
- Username used for connecting to the service, if applicable
- State string
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- StaticIps []string
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
[]KafkaConnect Tag Args 
- Tags are key-value pairs that allow you to categorize services.
- TechEmails []KafkaConnect Tech Email Args 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- TerminationProtection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additionalDisk StringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- cloudName String
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- components
List<KafkaConnect Component> 
- Service component information objects
- diskSpace String
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- diskSpace StringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace StringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace StringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace StringUsed 
- Disk space that service is currently using
- kafkaConnect KafkaUser Config Connect Kafka Connect User Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- maintenanceWindow StringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow StringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- projectVpc StringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceHost String
- The hostname of the service.
- serviceIntegrations List<KafkaConnect Service Integration> 
- Service integrations to specify when creating a service. Not applied after initial service creation
- serviceName String
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- servicePassword String
- Password used for connecting to the service, if applicable
- servicePort Integer
- The port of the service
- serviceType String
- Aiven internal service type code
- serviceUri String
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername String
- Username used for connecting to the service, if applicable
- state String
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- staticIps List<String>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
List<KafkaConnect Tag> 
- Tags are key-value pairs that allow you to categorize services.
- techEmails List<KafkaConnect Tech Email> 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection Boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additionalDisk stringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- cloudName string
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- components
KafkaConnect Component[] 
- Service component information objects
- diskSpace string
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- diskSpace stringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace stringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace stringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace stringUsed 
- Disk space that service is currently using
- kafkaConnect KafkaUser Config Connect Kafka Connect User Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- maintenanceWindow stringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow stringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan string
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project string
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- projectVpc stringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceHost string
- The hostname of the service.
- serviceIntegrations KafkaConnect Service Integration[] 
- Service integrations to specify when creating a service. Not applied after initial service creation
- serviceName string
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- servicePassword string
- Password used for connecting to the service, if applicable
- servicePort number
- The port of the service
- serviceType string
- Aiven internal service type code
- serviceUri string
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername string
- Username used for connecting to the service, if applicable
- state string
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- staticIps string[]
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
KafkaConnect Tag[] 
- Tags are key-value pairs that allow you to categorize services.
- techEmails KafkaConnect Tech Email[] 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additional_disk_ strspace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- cloud_name str
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- components
Sequence[KafkaConnect Component Args] 
- Service component information objects
- disk_space str
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- disk_space_ strcap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- disk_space_ strdefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- disk_space_ strstep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- disk_space_ strused 
- Disk space that service is currently using
- kafka_connect_ Kafkauser_ config Connect Kafka Connect User Config Args 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- maintenance_window_ strdow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenance_window_ strtime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan str
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project str
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- project_vpc_ strid 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- service_host str
- The hostname of the service.
- service_integrations Sequence[KafkaConnect Service Integration Args] 
- Service integrations to specify when creating a service. Not applied after initial service creation
- service_name str
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- service_password str
- Password used for connecting to the service, if applicable
- service_port int
- The port of the service
- service_type str
- Aiven internal service type code
- service_uri str
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- service_username str
- Username used for connecting to the service, if applicable
- state str
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- static_ips Sequence[str]
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- 
Sequence[KafkaConnect Tag Args] 
- Tags are key-value pairs that allow you to categorize services.
- tech_emails Sequence[KafkaConnect Tech Email Args] 
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- termination_protection bool
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
- additionalDisk StringSpace 
- Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart and there might be a short downtime for services with no HA capabilities.
- cloudName String
- The cloud provider and region the service is hosted in. The format is provider-region, for example:google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
- components List<Property Map>
- Service component information objects
- diskSpace String
- Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
- diskSpace StringCap 
- The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
- diskSpace StringDefault 
- The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
- diskSpace StringStep 
- The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_spaceneeds to increment fromdisk_space_defaultby increments of this size.
- diskSpace StringUsed 
- Disk space that service is currently using
- kafkaConnect Property MapUser Config 
- KafkaConnect user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
- maintenanceWindow StringDow 
- Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
- maintenanceWindow StringTime 
- Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
- plan String
- Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist,startup-x,business-xandpremium-xwherexis (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
- project String
- The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
- projectVpc StringId 
- Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
- serviceHost String
- The hostname of the service.
- serviceIntegrations List<Property Map>
- Service integrations to specify when creating a service. Not applied after initial service creation
- serviceName String
- Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
- servicePassword String
- Password used for connecting to the service, if applicable
- servicePort Number
- The port of the service
- serviceType String
- Aiven internal service type code
- serviceUri String
- URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
- serviceUsername String
- Username used for connecting to the service, if applicable
- state String
- Service state. One of POWEROFF,REBALANCING,REBUILDINGorRUNNING
- staticIps List<String>
- Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
- List<Property Map>
- Tags are key-value pairs that allow you to categorize services.
- techEmails List<Property Map>
- The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
- terminationProtection Boolean
- Prevents the service from being deleted. It is recommended to set this to truefor all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Supporting Types
KafkaConnectComponent, KafkaConnectComponentArgs      
- Component string
- Service component name
- ConnectionUri string
- Connection info for connecting to the service component. This is a combination of host and port.
- Host string
- Host name for connecting to the service component
- KafkaAuthentication stringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- Port int
- Port number for connecting to the service component
- Route string
- Network access route
- Ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- Usage string
- DNS usage name
- Component string
- Service component name
- ConnectionUri string
- Connection info for connecting to the service component. This is a combination of host and port.
- Host string
- Host name for connecting to the service component
- KafkaAuthentication stringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- Port int
- Port number for connecting to the service component
- Route string
- Network access route
- Ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- Usage string
- DNS usage name
- component String
- Service component name
- connectionUri String
- Connection info for connecting to the service component. This is a combination of host and port.
- host String
- Host name for connecting to the service component
- kafkaAuthentication StringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- port Integer
- Port number for connecting to the service component
- route String
- Network access route
- ssl Boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage String
- DNS usage name
- component string
- Service component name
- connectionUri string
- Connection info for connecting to the service component. This is a combination of host and port.
- host string
- Host name for connecting to the service component
- kafkaAuthentication stringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- port number
- Port number for connecting to the service component
- route string
- Network access route
- ssl boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage string
- DNS usage name
- component str
- Service component name
- connection_uri str
- Connection info for connecting to the service component. This is a combination of host and port.
- host str
- Host name for connecting to the service component
- kafka_authentication_ strmethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- port int
- Port number for connecting to the service component
- route str
- Network access route
- ssl bool
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage str
- DNS usage name
- component String
- Service component name
- connectionUri String
- Connection info for connecting to the service component. This is a combination of host and port.
- host String
- Host name for connecting to the service component
- kafkaAuthentication StringMethod 
- Kafka authentication method. This is a value specific to the 'kafka' service component
- port Number
- Port number for connecting to the service component
- route String
- Network access route
- ssl Boolean
- Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
- usage String
- DNS usage name
KafkaConnectKafkaConnectUserConfig, KafkaConnectKafkaConnectUserConfigArgs            
- AdditionalBackup stringRegions 
- Additional Cloud Regions for Backup Replication.
- IpFilter List<KafkaObjects Connect Kafka Connect User Config Ip Filter Object> 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- IpFilter List<string>Strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- IpFilters List<string>
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- KafkaConnect KafkaConnect Kafka Connect User Config Kafka Connect 
- Kafka Connect configuration values
- PluginVersions List<KafkaConnect Kafka Connect User Config Plugin Version> 
- The plugin selected by the user
- PrivateAccess KafkaConnect Kafka Connect User Config Private Access 
- Allow access to selected service ports from private networks
- PrivatelinkAccess KafkaConnect Kafka Connect User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- PublicAccess KafkaConnect Kafka Connect User Config Public Access 
- Allow access to selected service ports from the public Internet
- SecretProviders List<KafkaConnect Kafka Connect User Config Secret Provider> 
- ServiceLog bool
- Store logs for the service so that they are available in the HTTP API and console.
- StaticIps bool
- Use static public IP addresses.
- AdditionalBackup stringRegions 
- Additional Cloud Regions for Backup Replication.
- IpFilter []KafkaObjects Connect Kafka Connect User Config Ip Filter Object 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- IpFilter []stringStrings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- IpFilters []string
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- KafkaConnect KafkaConnect Kafka Connect User Config Kafka Connect 
- Kafka Connect configuration values
- PluginVersions []KafkaConnect Kafka Connect User Config Plugin Version 
- The plugin selected by the user
- PrivateAccess KafkaConnect Kafka Connect User Config Private Access 
- Allow access to selected service ports from private networks
- PrivatelinkAccess KafkaConnect Kafka Connect User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- PublicAccess KafkaConnect Kafka Connect User Config Public Access 
- Allow access to selected service ports from the public Internet
- SecretProviders []KafkaConnect Kafka Connect User Config Secret Provider 
- ServiceLog bool
- Store logs for the service so that they are available in the HTTP API and console.
- StaticIps bool
- Use static public IP addresses.
- additionalBackup StringRegions 
- Additional Cloud Regions for Backup Replication.
- ipFilter List<KafkaObjects Connect Kafka Connect User Config Ip Filter Object> 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- ipFilter List<String>Strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- ipFilters List<String>
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- kafkaConnect KafkaConnect Kafka Connect User Config Kafka Connect 
- Kafka Connect configuration values
- pluginVersions List<KafkaConnect Kafka Connect User Config Plugin Version> 
- The plugin selected by the user
- privateAccess KafkaConnect Kafka Connect User Config Private Access 
- Allow access to selected service ports from private networks
- privatelinkAccess KafkaConnect Kafka Connect User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- publicAccess KafkaConnect Kafka Connect User Config Public Access 
- Allow access to selected service ports from the public Internet
- secretProviders List<KafkaConnect Kafka Connect User Config Secret Provider> 
- serviceLog Boolean
- Store logs for the service so that they are available in the HTTP API and console.
- staticIps Boolean
- Use static public IP addresses.
- additionalBackup stringRegions 
- Additional Cloud Regions for Backup Replication.
- ipFilter KafkaObjects Connect Kafka Connect User Config Ip Filter Object[] 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- ipFilter string[]Strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- ipFilters string[]
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- kafkaConnect KafkaConnect Kafka Connect User Config Kafka Connect 
- Kafka Connect configuration values
- pluginVersions KafkaConnect Kafka Connect User Config Plugin Version[] 
- The plugin selected by the user
- privateAccess KafkaConnect Kafka Connect User Config Private Access 
- Allow access to selected service ports from private networks
- privatelinkAccess KafkaConnect Kafka Connect User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- publicAccess KafkaConnect Kafka Connect User Config Public Access 
- Allow access to selected service ports from the public Internet
- secretProviders KafkaConnect Kafka Connect User Config Secret Provider[] 
- serviceLog boolean
- Store logs for the service so that they are available in the HTTP API and console.
- staticIps boolean
- Use static public IP addresses.
- additional_backup_ strregions 
- Additional Cloud Regions for Backup Replication.
- ip_filter_ Sequence[Kafkaobjects Connect Kafka Connect User Config Ip Filter Object] 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- ip_filter_ Sequence[str]strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- ip_filters Sequence[str]
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- kafka_connect KafkaConnect Kafka Connect User Config Kafka Connect 
- Kafka Connect configuration values
- plugin_versions Sequence[KafkaConnect Kafka Connect User Config Plugin Version] 
- The plugin selected by the user
- private_access KafkaConnect Kafka Connect User Config Private Access 
- Allow access to selected service ports from private networks
- privatelink_access KafkaConnect Kafka Connect User Config Privatelink Access 
- Allow access to selected service components through Privatelink
- public_access KafkaConnect Kafka Connect User Config Public Access 
- Allow access to selected service ports from the public Internet
- secret_providers Sequence[KafkaConnect Kafka Connect User Config Secret Provider] 
- service_log bool
- Store logs for the service so that they are available in the HTTP API and console.
- static_ips bool
- Use static public IP addresses.
- additionalBackup StringRegions 
- Additional Cloud Regions for Backup Replication.
- ipFilter List<Property Map>Objects 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
- ipFilter List<String>Strings 
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- ipFilters List<String>
- Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
- kafkaConnect Property Map
- Kafka Connect configuration values
- pluginVersions List<Property Map>
- The plugin selected by the user
- privateAccess Property Map
- Allow access to selected service ports from private networks
- privatelinkAccess Property Map
- Allow access to selected service components through Privatelink
- publicAccess Property Map
- Allow access to selected service ports from the public Internet
- secretProviders List<Property Map>
- serviceLog Boolean
- Store logs for the service so that they are available in the HTTP API and console.
- staticIps Boolean
- Use static public IP addresses.
KafkaConnectKafkaConnectUserConfigIpFilterObject, KafkaConnectKafkaConnectUserConfigIpFilterObjectArgs                  
- Network string
- CIDR address block. Example: 10.20.0.0/16.
- Description string
- Description for IP filter list entry. Example: Production service IP range.
- Network string
- CIDR address block. Example: 10.20.0.0/16.
- Description string
- Description for IP filter list entry. Example: Production service IP range.
- network String
- CIDR address block. Example: 10.20.0.0/16.
- description String
- Description for IP filter list entry. Example: Production service IP range.
- network string
- CIDR address block. Example: 10.20.0.0/16.
- description string
- Description for IP filter list entry. Example: Production service IP range.
- network str
- CIDR address block. Example: 10.20.0.0/16.
- description str
- Description for IP filter list entry. Example: Production service IP range.
- network String
- CIDR address block. Example: 10.20.0.0/16.
- description String
- Description for IP filter list entry. Example: Production service IP range.
KafkaConnectKafkaConnectUserConfigKafkaConnect, KafkaConnectKafkaConnectUserConfigKafkaConnectArgs                
- ConnectorClient stringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- ConsumerAuto stringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- ConsumerFetch intMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- ConsumerIsolation stringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- ConsumerMax intPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- ConsumerMax intPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- ConsumerMax intPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- OffsetFlush intInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- OffsetFlush intTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- ProducerBatch intSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- ProducerBuffer intMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- ProducerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- ProducerLinger intMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- ProducerMax intRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- ScheduledRebalance intMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- SessionTimeout intMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- ConnectorClient stringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- ConsumerAuto stringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- ConsumerFetch intMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- ConsumerIsolation stringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- ConsumerMax intPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- ConsumerMax intPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- ConsumerMax intPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- OffsetFlush intInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- OffsetFlush intTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- ProducerBatch intSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- ProducerBuffer intMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- ProducerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- ProducerLinger intMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- ProducerMax intRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- ScheduledRebalance intMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- SessionTimeout intMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connectorClient StringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- consumerAuto StringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumerFetch IntegerMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- consumerIsolation StringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumerMax IntegerPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- consumerMax IntegerPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumerMax IntegerPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- offsetFlush IntegerInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- offsetFlush IntegerTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producerBatch IntegerSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producerBuffer IntegerMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producerCompression StringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger IntegerMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- producerMax IntegerRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- scheduledRebalance IntegerMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- sessionTimeout IntegerMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connectorClient stringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- consumerAuto stringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumerFetch numberMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- consumerIsolation stringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumerMax numberPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- consumerMax numberPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumerMax numberPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- offsetFlush numberInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- offsetFlush numberTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producerBatch numberSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producerBuffer numberMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producerCompression stringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger numberMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- producerMax numberRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- scheduledRebalance numberMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- sessionTimeout numberMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connector_client_ strconfig_ override_ policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- consumer_auto_ stroffset_ reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumer_fetch_ intmax_ bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- consumer_isolation_ strlevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumer_max_ intpartition_ fetch_ bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- consumer_max_ intpoll_ interval_ ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumer_max_ intpoll_ records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- offset_flush_ intinterval_ ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- offset_flush_ inttimeout_ ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producer_batch_ intsize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producer_buffer_ intmemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producer_compression_ strtype 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producer_linger_ intms 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- producer_max_ intrequest_ size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- scheduled_rebalance_ intmax_ delay_ ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- session_timeout_ intms 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
- connectorClient StringConfig Override Policy 
- Enum: All,None. Defines what client configurations can be overridden by the connector. Default is None.
- consumerAuto StringOffset Reset 
- Enum: earliest,latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- consumerFetch NumberMax Bytes 
- Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
- consumerIsolation StringLevel 
- Enum: read_committed,read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
- consumerMax NumberPartition Fetch Bytes 
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
- consumerMax NumberPoll Interval Ms 
- The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- consumerMax NumberPoll Records 
- The maximum number of records returned in a single call to poll() (defaults to 500).
- offsetFlush NumberInterval Ms 
- The interval at which to try committing offsets for tasks (defaults to 60000).
- offsetFlush NumberTimeout Ms 
- Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
- producerBatch NumberSize 
- This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will lingerfor the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
- producerBuffer NumberMemory 
- The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
- producerCompression StringType 
- Enum: gzip,lz4,none,snappy,zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip,snappy,lz4,zstd). It additionally acceptsnonewhich is the default and equivalent to no compression.
- producerLinger NumberMs 
- This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will lingerfor the specified time waiting for more records to show up. Defaults to 0.
- producerMax NumberRequest Size 
- This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
- scheduledRebalance NumberMax Delay Ms 
- The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
- sessionTimeout NumberMs 
- The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
KafkaConnectKafkaConnectUserConfigPluginVersion, KafkaConnectKafkaConnectUserConfigPluginVersionArgs                
- PluginName string
- The name of the plugin. Example: debezium-connector.
- Version string
- The version of the plugin. Example: 2.5.0.
- PluginName string
- The name of the plugin. Example: debezium-connector.
- Version string
- The version of the plugin. Example: 2.5.0.
- pluginName String
- The name of the plugin. Example: debezium-connector.
- version String
- The version of the plugin. Example: 2.5.0.
- pluginName string
- The name of the plugin. Example: debezium-connector.
- version string
- The version of the plugin. Example: 2.5.0.
- plugin_name str
- The name of the plugin. Example: debezium-connector.
- version str
- The version of the plugin. Example: 2.5.0.
- pluginName String
- The name of the plugin. Example: debezium-connector.
- version String
- The version of the plugin. Example: 2.5.0.
KafkaConnectKafkaConnectUserConfigPrivateAccess, KafkaConnectKafkaConnectUserConfigPrivateAccessArgs                
- KafkaConnect bool
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- KafkaConnect bool
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- Prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaConnect Boolean
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus Boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaConnect boolean
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafka_connect bool
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus bool
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- kafkaConnect Boolean
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
- prometheus Boolean
- Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaConnectKafkaConnectUserConfigPrivatelinkAccess, KafkaConnectKafkaConnectUserConfigPrivatelinkAccessArgs                
- Jolokia bool
- Enable jolokia.
- KafkaConnect bool
- Enable kafka_connect.
- Prometheus bool
- Enable prometheus.
- Jolokia bool
- Enable jolokia.
- KafkaConnect bool
- Enable kafka_connect.
- Prometheus bool
- Enable prometheus.
- jolokia Boolean
- Enable jolokia.
- kafkaConnect Boolean
- Enable kafka_connect.
- prometheus Boolean
- Enable prometheus.
- jolokia boolean
- Enable jolokia.
- kafkaConnect boolean
- Enable kafka_connect.
- prometheus boolean
- Enable prometheus.
- jolokia bool
- Enable jolokia.
- kafka_connect bool
- Enable kafka_connect.
- prometheus bool
- Enable prometheus.
- jolokia Boolean
- Enable jolokia.
- kafkaConnect Boolean
- Enable kafka_connect.
- prometheus Boolean
- Enable prometheus.
KafkaConnectKafkaConnectUserConfigPublicAccess, KafkaConnectKafkaConnectUserConfigPublicAccessArgs                
- KafkaConnect bool
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- Prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- KafkaConnect bool
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- Prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaConnect Boolean
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus Boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaConnect boolean
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- kafka_connect bool
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus bool
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
- kafkaConnect Boolean
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
- prometheus Boolean
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
KafkaConnectKafkaConnectUserConfigSecretProvider, KafkaConnectKafkaConnectUserConfigSecretProviderArgs                
- Name string
- Name of the secret provider. Used to reference secrets in connector config.
- Aws
KafkaConnect Kafka Connect User Config Secret Provider Aws 
- AWS secret provider configuration
- Vault
KafkaConnect Kafka Connect User Config Secret Provider Vault 
- Vault secret provider configuration
- Name string
- Name of the secret provider. Used to reference secrets in connector config.
- Aws
KafkaConnect Kafka Connect User Config Secret Provider Aws 
- AWS secret provider configuration
- Vault
KafkaConnect Kafka Connect User Config Secret Provider Vault 
- Vault secret provider configuration
- name String
- Name of the secret provider. Used to reference secrets in connector config.
- aws
KafkaConnect Kafka Connect User Config Secret Provider Aws 
- AWS secret provider configuration
- vault
KafkaConnect Kafka Connect User Config Secret Provider Vault 
- Vault secret provider configuration
- name string
- Name of the secret provider. Used to reference secrets in connector config.
- aws
KafkaConnect Kafka Connect User Config Secret Provider Aws 
- AWS secret provider configuration
- vault
KafkaConnect Kafka Connect User Config Secret Provider Vault 
- Vault secret provider configuration
- name str
- Name of the secret provider. Used to reference secrets in connector config.
- aws
KafkaConnect Kafka Connect User Config Secret Provider Aws 
- AWS secret provider configuration
- vault
KafkaConnect Kafka Connect User Config Secret Provider Vault 
- Vault secret provider configuration
- name String
- Name of the secret provider. Used to reference secrets in connector config.
- aws Property Map
- AWS secret provider configuration
- vault Property Map
- Vault secret provider configuration
KafkaConnectKafkaConnectUserConfigSecretProviderAws, KafkaConnectKafkaConnectUserConfigSecretProviderAwsArgs                  
- AuthMethod string
- Enum: credentials. Auth method of the vault secret provider.
- Region string
- Region used to lookup secrets with AWS SecretManager.
- AccessKey string
- Access key used to authenticate with aws.
- SecretKey string
- Secret key used to authenticate with aws.
- AuthMethod string
- Enum: credentials. Auth method of the vault secret provider.
- Region string
- Region used to lookup secrets with AWS SecretManager.
- AccessKey string
- Access key used to authenticate with aws.
- SecretKey string
- Secret key used to authenticate with aws.
- authMethod String
- Enum: credentials. Auth method of the vault secret provider.
- region String
- Region used to lookup secrets with AWS SecretManager.
- accessKey String
- Access key used to authenticate with aws.
- secretKey String
- Secret key used to authenticate with aws.
- authMethod string
- Enum: credentials. Auth method of the vault secret provider.
- region string
- Region used to lookup secrets with AWS SecretManager.
- accessKey string
- Access key used to authenticate with aws.
- secretKey string
- Secret key used to authenticate with aws.
- auth_method str
- Enum: credentials. Auth method of the vault secret provider.
- region str
- Region used to lookup secrets with AWS SecretManager.
- access_key str
- Access key used to authenticate with aws.
- secret_key str
- Secret key used to authenticate with aws.
- authMethod String
- Enum: credentials. Auth method of the vault secret provider.
- region String
- Region used to lookup secrets with AWS SecretManager.
- accessKey String
- Access key used to authenticate with aws.
- secretKey String
- Secret key used to authenticate with aws.
KafkaConnectKafkaConnectUserConfigSecretProviderVault, KafkaConnectKafkaConnectUserConfigSecretProviderVaultArgs                  
- Address string
- Address of the Vault server.
- AuthMethod string
- Enum: token. Auth method of the vault secret provider.
- EngineVersion int
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- PrefixPath intDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- Token string
- Token used to authenticate with vault and auth method token.
- Address string
- Address of the Vault server.
- AuthMethod string
- Enum: token. Auth method of the vault secret provider.
- EngineVersion int
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- PrefixPath intDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- Token string
- Token used to authenticate with vault and auth method token.
- address String
- Address of the Vault server.
- authMethod String
- Enum: token. Auth method of the vault secret provider.
- engineVersion Integer
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- prefixPath IntegerDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- token String
- Token used to authenticate with vault and auth method token.
- address string
- Address of the Vault server.
- authMethod string
- Enum: token. Auth method of the vault secret provider.
- engineVersion number
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- prefixPath numberDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- token string
- Token used to authenticate with vault and auth method token.
- address str
- Address of the Vault server.
- auth_method str
- Enum: token. Auth method of the vault secret provider.
- engine_version int
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- prefix_path_ intdepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- token str
- Token used to authenticate with vault and auth method token.
- address String
- Address of the Vault server.
- authMethod String
- Enum: token. Auth method of the vault secret provider.
- engineVersion Number
- Enum: 1,2, and newer. KV Secrets Engine version of the Vault server instance.
- prefixPath NumberDepth 
- Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
- token String
- Token used to authenticate with vault and auth method token.
KafkaConnectServiceIntegration, KafkaConnectServiceIntegrationArgs        
- IntegrationType string
- Type of the service integration
- SourceService stringName 
- Name of the source service
- IntegrationType string
- Type of the service integration
- SourceService stringName 
- Name of the source service
- integrationType String
- Type of the service integration
- sourceService StringName 
- Name of the source service
- integrationType string
- Type of the service integration
- sourceService stringName 
- Name of the source service
- integration_type str
- Type of the service integration
- source_service_ strname 
- Name of the source service
- integrationType String
- Type of the service integration
- sourceService StringName 
- Name of the source service
KafkaConnectTag, KafkaConnectTagArgs      
KafkaConnectTechEmail, KafkaConnectTechEmailArgs        
- Email string
- An email address to contact for technical issues
- Email string
- An email address to contact for technical issues
- email String
- An email address to contact for technical issues
- email string
- An email address to contact for technical issues
- email str
- An email address to contact for technical issues
- email String
- An email address to contact for technical issues
Import
$ pulumi import aiven:index/kafkaConnect:KafkaConnect example_kafka_connect PROJECT/SERVICE_NAME
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Aiven pulumi/pulumi-aiven
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the aivenTerraform Provider.