Confluent v2.27.0 published on Friday, May 9, 2025 by Pulumi
confluentcloud.getTableflowTopic
Explore with Pulumi AI
confluentcloud.TableflowTopic describes a Tableflow Topic data source.
Example Usage
Option #1: Manage multiple Tableflow Topics in the same Pulumi Stack
import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";
export = async () => {
const example = await confluentcloud.getTableflowTopic({
environment: {
id: staging.id,
},
kafkaCluster: {
id: stagingConfluentKafkaCluster.id,
},
displayName: "tableflow-example",
credentials: {
key: env_admin_tableflow_api_key.id,
secret: env_admin_tableflow_api_key.secret,
},
});
return {
"retention-ms": example.retentionMs,
};
}
import pulumi
import pulumi_confluentcloud as confluentcloud
example = confluentcloud.get_tableflow_topic(environment={
"id": staging["id"],
},
kafka_cluster={
"id": staging_confluent_kafka_cluster["id"],
},
display_name="tableflow-example",
credentials={
"key": env_admin_tableflow_api_key["id"],
"secret": env_admin_tableflow_api_key["secret"],
})
pulumi.export("retention-ms", example.retention_ms)
package main
import (
"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := confluentcloud.LookupTableflowTopic(ctx, &confluentcloud.LookupTableflowTopicArgs{
Environment: confluentcloud.GetTableflowTopicEnvironment{
Id: staging.Id,
},
KafkaCluster: confluentcloud.GetTableflowTopicKafkaCluster{
Id: stagingConfluentKafkaCluster.Id,
},
DisplayName: "tableflow-example",
Credentials: confluentcloud.GetTableflowTopicCredentials{
Key: env_admin_tableflow_api_key.Id,
Secret: env_admin_tableflow_api_key.Secret,
},
}, nil)
if err != nil {
return err
}
ctx.Export("retention-ms", example.RetentionMs)
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;
return await Deployment.RunAsync(() =>
{
var example = ConfluentCloud.GetTableflowTopic.Invoke(new()
{
Environment = new ConfluentCloud.Inputs.GetTableflowTopicEnvironmentInputArgs
{
Id = staging.Id,
},
KafkaCluster = new ConfluentCloud.Inputs.GetTableflowTopicKafkaClusterInputArgs
{
Id = stagingConfluentKafkaCluster.Id,
},
DisplayName = "tableflow-example",
Credentials = new ConfluentCloud.Inputs.GetTableflowTopicCredentialsInputArgs
{
Key = env_admin_tableflow_api_key.Id,
Secret = env_admin_tableflow_api_key.Secret,
},
});
return new Dictionary<string, object?>
{
["retention-ms"] = example.Apply(getTableflowTopicResult => getTableflowTopicResult.RetentionMs),
};
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.ConfluentcloudFunctions;
import com.pulumi.confluentcloud.inputs.GetTableflowTopicArgs;
import com.pulumi.confluentcloud.inputs.GetTableflowTopicEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.GetTableflowTopicKafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.GetTableflowTopicCredentialsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var example = ConfluentcloudFunctions.getTableflowTopic(GetTableflowTopicArgs.builder()
.environment(GetTableflowTopicEnvironmentArgs.builder()
.id(staging.id())
.build())
.kafkaCluster(GetTableflowTopicKafkaClusterArgs.builder()
.id(stagingConfluentKafkaCluster.id())
.build())
.displayName("tableflow-example")
.credentials(GetTableflowTopicCredentialsArgs.builder()
.key(env_admin_tableflow_api_key.id())
.secret(env_admin_tableflow_api_key.secret())
.build())
.build());
ctx.export("retention-ms", example.retentionMs());
}
}
variables:
example:
fn::invoke:
function: confluentcloud:getTableflowTopic
arguments:
environment:
id: ${staging.id}
kafkaCluster:
id: ${stagingConfluentKafkaCluster.id}
displayName: tableflow-example
credentials:
key: ${["env-admin-tableflow-api-key"].id}
secret: ${["env-admin-tableflow-api-key"].secret}
outputs:
retention-ms: ${example.retentionMs}
Option #2: Manage a single Tableflow Topic in the same Pulumi Stack
import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";
export = async () => {
const example = await confluentcloud.getTableflowTopic({
displayName: "tableflow-example",
});
return {
"retention-ms": example.retentionMs,
};
}
import pulumi
import pulumi_confluentcloud as confluentcloud
example = confluentcloud.get_tableflow_topic(display_name="tableflow-example")
pulumi.export("retention-ms", example.retention_ms)
package main
import (
"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := confluentcloud.LookupTableflowTopic(ctx, &confluentcloud.LookupTableflowTopicArgs{
DisplayName: "tableflow-example",
}, nil)
if err != nil {
return err
}
ctx.Export("retention-ms", example.RetentionMs)
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;
return await Deployment.RunAsync(() =>
{
var example = ConfluentCloud.GetTableflowTopic.Invoke(new()
{
DisplayName = "tableflow-example",
});
return new Dictionary<string, object?>
{
["retention-ms"] = example.Apply(getTableflowTopicResult => getTableflowTopicResult.RetentionMs),
};
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.ConfluentcloudFunctions;
import com.pulumi.confluentcloud.inputs.GetTableflowTopicArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var example = ConfluentcloudFunctions.getTableflowTopic(GetTableflowTopicArgs.builder()
.displayName("tableflow-example")
.build());
ctx.export("retention-ms", example.retentionMs());
}
}
variables:
example:
fn::invoke:
function: confluentcloud:getTableflowTopic
arguments:
displayName: tableflow-example
outputs:
retention-ms: ${example.retentionMs}
Using getTableflowTopic
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getTableflowTopic(args: GetTableflowTopicArgs, opts?: InvokeOptions): Promise<GetTableflowTopicResult>
function getTableflowTopicOutput(args: GetTableflowTopicOutputArgs, opts?: InvokeOptions): Output<GetTableflowTopicResult>def get_tableflow_topic(credentials: Optional[GetTableflowTopicCredentials] = None,
display_name: Optional[str] = None,
environment: Optional[GetTableflowTopicEnvironment] = None,
kafka_cluster: Optional[GetTableflowTopicKafkaCluster] = None,
opts: Optional[InvokeOptions] = None) -> GetTableflowTopicResult
def get_tableflow_topic_output(credentials: Optional[pulumi.Input[GetTableflowTopicCredentialsArgs]] = None,
display_name: Optional[pulumi.Input[str]] = None,
environment: Optional[pulumi.Input[GetTableflowTopicEnvironmentArgs]] = None,
kafka_cluster: Optional[pulumi.Input[GetTableflowTopicKafkaClusterArgs]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetTableflowTopicResult]func LookupTableflowTopic(ctx *Context, args *LookupTableflowTopicArgs, opts ...InvokeOption) (*LookupTableflowTopicResult, error)
func LookupTableflowTopicOutput(ctx *Context, args *LookupTableflowTopicOutputArgs, opts ...InvokeOption) LookupTableflowTopicResultOutput> Note: This function is named LookupTableflowTopic in the Go SDK.
public static class GetTableflowTopic
{
public static Task<GetTableflowTopicResult> InvokeAsync(GetTableflowTopicArgs args, InvokeOptions? opts = null)
public static Output<GetTableflowTopicResult> Invoke(GetTableflowTopicInvokeArgs args, InvokeOptions? opts = null)
}public static CompletableFuture<GetTableflowTopicResult> getTableflowTopic(GetTableflowTopicArgs args, InvokeOptions options)
public static Output<GetTableflowTopicResult> getTableflowTopic(GetTableflowTopicArgs args, InvokeOptions options)
fn::invoke:
function: confluentcloud:index/getTableflowTopic:getTableflowTopic
arguments:
# arguments dictionaryThe following arguments are supported:
- Display
Name string - The name of the Tableflow Topic.
- Environment
Get
Tableflow Topic Environment - Kafka
Cluster GetTableflow Topic Kafka Cluster - Credentials
Get
Tableflow Topic Credentials
- display
Name String - The name of the Tableflow Topic.
- environment
Get
Tableflow Topic Environment - kafka
Cluster GetTableflow Topic Kafka Cluster - credentials
Get
Tableflow Topic Credentials
- display
Name string - The name of the Tableflow Topic.
- environment
Get
Tableflow Topic Environment - kafka
Cluster GetTableflow Topic Kafka Cluster - credentials
Get
Tableflow Topic Credentials
- display_
name str - The name of the Tableflow Topic.
- environment
Get
Tableflow Topic Environment - kafka_
cluster GetTableflow Topic Kafka Cluster - credentials
Get
Tableflow Topic Credentials
- display
Name String - The name of the Tableflow Topic.
- environment Property Map
- kafka
Cluster Property Map - credentials Property Map
getTableflowTopic Result
The following output properties are available:
- Byob
Aws List<Pulumi.Confluent Cloud. Outputs. Get Tableflow Topic Byob Aw> - (Optional Configuration Block) supports the following:
- Display
Name string - Enable
Compaction bool - (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
- Enable
Partitioning bool - (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
- Environment
Pulumi.
Confluent Cloud. Outputs. Get Tableflow Topic Environment - Id string
- The provider-assigned unique ID for this managed resource.
- Kafka
Cluster Pulumi.Confluent Cloud. Outputs. Get Tableflow Topic Kafka Cluster - Managed
Storages List<Pulumi.Confluent Cloud. Outputs. Get Tableflow Topic Managed Storage> - (Optional Configuration Block) The configuration of the Confluent managed bucket.
- Record
Failure stringStrategy - (Optional String) The strategy to handle record failures in the Tableflow enabled topic during materialization. For
SKIP, we skip the bad records and move to the next record. ForSUSPEND, we suspend the materialization of the topic. - Retention
Ms string - (Optional String) The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
- Suspended bool
- (Optional Boolean) Indicates whether the Tableflow should be suspended.
- Table
Formats List<string> - (Optional List) The supported table formats for the Tableflow-enabled topic.
- Credentials
Pulumi.
Confluent Cloud. Outputs. Get Tableflow Topic Credentials
- Byob
Aws []GetTableflow Topic Byob Aw - (Optional Configuration Block) supports the following:
- Display
Name string - Enable
Compaction bool - (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
- Enable
Partitioning bool - (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
- Environment
Get
Tableflow Topic Environment - Id string
- The provider-assigned unique ID for this managed resource.
- Kafka
Cluster GetTableflow Topic Kafka Cluster - Managed
Storages []GetTableflow Topic Managed Storage - (Optional Configuration Block) The configuration of the Confluent managed bucket.
- Record
Failure stringStrategy - (Optional String) The strategy to handle record failures in the Tableflow enabled topic during materialization. For
SKIP, we skip the bad records and move to the next record. ForSUSPEND, we suspend the materialization of the topic. - Retention
Ms string - (Optional String) The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
- Suspended bool
- (Optional Boolean) Indicates whether the Tableflow should be suspended.
- Table
Formats []string - (Optional List) The supported table formats for the Tableflow-enabled topic.
- Credentials
Get
Tableflow Topic Credentials
- byob
Aws List<GetTableflow Topic Byob Aw> - (Optional Configuration Block) supports the following:
- display
Name String - enable
Compaction Boolean - (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
- enable
Partitioning Boolean - (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
- environment
Get
Tableflow Topic Environment - id String
- The provider-assigned unique ID for this managed resource.
- kafka
Cluster GetTableflow Topic Kafka Cluster - managed
Storages List<GetTableflow Topic Managed Storage> - (Optional Configuration Block) The configuration of the Confluent managed bucket.
- record
Failure StringStrategy - (Optional String) The strategy to handle record failures in the Tableflow enabled topic during materialization. For
SKIP, we skip the bad records and move to the next record. ForSUSPEND, we suspend the materialization of the topic. - retention
Ms String - (Optional String) The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
- suspended Boolean
- (Optional Boolean) Indicates whether the Tableflow should be suspended.
- table
Formats List<String> - (Optional List) The supported table formats for the Tableflow-enabled topic.
- credentials
Get
Tableflow Topic Credentials
- byob
Aws GetTableflow Topic Byob Aw[] - (Optional Configuration Block) supports the following:
- display
Name string - enable
Compaction boolean - (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
- enable
Partitioning boolean - (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
- environment
Get
Tableflow Topic Environment - id string
- The provider-assigned unique ID for this managed resource.
- kafka
Cluster GetTableflow Topic Kafka Cluster - managed
Storages GetTableflow Topic Managed Storage[] - (Optional Configuration Block) The configuration of the Confluent managed bucket.
- record
Failure stringStrategy - (Optional String) The strategy to handle record failures in the Tableflow enabled topic during materialization. For
SKIP, we skip the bad records and move to the next record. ForSUSPEND, we suspend the materialization of the topic. - retention
Ms string - (Optional String) The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
- suspended boolean
- (Optional Boolean) Indicates whether the Tableflow should be suspended.
- table
Formats string[] - (Optional List) The supported table formats for the Tableflow-enabled topic.
- credentials
Get
Tableflow Topic Credentials
- byob_
aws Sequence[GetTableflow Topic Byob Aw] - (Optional Configuration Block) supports the following:
- display_
name str - enable_
compaction bool - (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
- enable_
partitioning bool - (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
- environment
Get
Tableflow Topic Environment - id str
- The provider-assigned unique ID for this managed resource.
- kafka_
cluster GetTableflow Topic Kafka Cluster - managed_
storages Sequence[GetTableflow Topic Managed Storage] - (Optional Configuration Block) The configuration of the Confluent managed bucket.
- record_
failure_ strstrategy - (Optional String) The strategy to handle record failures in the Tableflow enabled topic during materialization. For
SKIP, we skip the bad records and move to the next record. ForSUSPEND, we suspend the materialization of the topic. - retention_
ms str - (Optional String) The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
- suspended bool
- (Optional Boolean) Indicates whether the Tableflow should be suspended.
- table_
formats Sequence[str] - (Optional List) The supported table formats for the Tableflow-enabled topic.
- credentials
Get
Tableflow Topic Credentials
- byob
Aws List<Property Map> - (Optional Configuration Block) supports the following:
- display
Name String - enable
Compaction Boolean - (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
- enable
Partitioning Boolean - (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
- environment Property Map
- id String
- The provider-assigned unique ID for this managed resource.
- kafka
Cluster Property Map - managed
Storages List<Property Map> - (Optional Configuration Block) The configuration of the Confluent managed bucket.
- record
Failure StringStrategy - (Optional String) The strategy to handle record failures in the Tableflow enabled topic during materialization. For
SKIP, we skip the bad records and move to the next record. ForSUSPEND, we suspend the materialization of the topic. - retention
Ms String - (Optional String) The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
- suspended Boolean
- (Optional Boolean) Indicates whether the Tableflow should be suspended.
- table
Formats List<String> - (Optional List) The supported table formats for the Tableflow-enabled topic.
- credentials Property Map
Supporting Types
GetTableflowTopicByobAw
- Bucket
Name string - (Required String) The bucket name.
- Bucket
Region string - (Required String) The bucket region.
- Provider
Integration stringId - (Required String) The provider integration id.
- Bucket
Name string - (Required String) The bucket name.
- Bucket
Region string - (Required String) The bucket region.
- Provider
Integration stringId - (Required String) The provider integration id.
- bucket
Name String - (Required String) The bucket name.
- bucket
Region String - (Required String) The bucket region.
- provider
Integration StringId - (Required String) The provider integration id.
- bucket
Name string - (Required String) The bucket name.
- bucket
Region string - (Required String) The bucket region.
- provider
Integration stringId - (Required String) The provider integration id.
- bucket_
name str - (Required String) The bucket name.
- bucket_
region str - (Required String) The bucket region.
- provider_
integration_ strid - (Required String) The provider integration id.
- bucket
Name String - (Required String) The bucket name.
- bucket
Region String - (Required String) The bucket region.
- provider
Integration StringId - (Required String) The provider integration id.
GetTableflowTopicCredentials
GetTableflowTopicEnvironment
- Id string
- The ID of the Environment, for example,
env-abc123.
- Id string
- The ID of the Environment, for example,
env-abc123.
- id String
- The ID of the Environment, for example,
env-abc123.
- id string
- The ID of the Environment, for example,
env-abc123.
- id str
- The ID of the Environment, for example,
env-abc123.
- id String
- The ID of the Environment, for example,
env-abc123.
GetTableflowTopicKafkaCluster
- Id string
- The ID of the Kafka cluster, for example,
lkc-abc123.
- Id string
- The ID of the Kafka cluster, for example,
lkc-abc123.
- id String
- The ID of the Kafka cluster, for example,
lkc-abc123.
- id string
- The ID of the Kafka cluster, for example,
lkc-abc123.
- id str
- The ID of the Kafka cluster, for example,
lkc-abc123.
- id String
- The ID of the Kafka cluster, for example,
lkc-abc123.
Package Details
- Repository
- Confluent Cloud pulumi/pulumi-confluentcloud
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
confluentTerraform Provider.