Schema URL

Type: io.kestra.core.models.flows.Flow

Definitions

com.fasterxml.jackson.databind.JsonNode object
com.google.cloud.bigquery.EncryptionConfiguration object
kmsKeyName string
com.google.cloud.bigquery.StandardTableDefinition-StreamingBuffer object
estimatedBytes integer
estimatedRows integer
oldestEntryTime integer
com.google.cloud.storage.Cors-Origin object
value string
com.surrealdb.connection.SurrealConnection object
com.theokanning.openai.completion.chat.ChatFunctionCall object
arguments object
name string
com.theokanning.openai.completion.chat.ChatMessage object
content string
function_call object
2 nested properties
arguments object
name string
name string
role string
com.unboundid.ldap.sdk.SearchScope object
name string
io.kestra.core.models.Label object
key string required
value string required
io.kestra.core.models.conditions.Condition io.kestra.plugin.core.condition.DateTimeBetweenCondition | io.kestra.plugin.core.condition.DayWeekCondition | io.kestra.plugin.core.condition.DayWeekInMonthCondition | io.kestra.plugin.core.condition.ExecutionFlowCondition | io.kestra.plugin.core.condition.ExecutionLabelsCondition | io.kestra.plugin.core.condition.ExecutionNamespaceCondition | io.kestra.plugin.core.condition.ExecutionOutputsCondition | io.kestra.plugin.core.condition.ExecutionStatusCondition | io.kestra.plugin.core.condition.ExpressionCondition | io.kestra.plugin.core.condition.FlowCondition | io.kestra.plugin.core.condition.FlowNamespaceCondition | io.kestra.plugin.core.condition.HasRetryAttemptCondition | io.kestra.plugin.core.condition.MultipleCondition | io.kestra.plugin.core.condition.NotCondition | io.kestra.plugin.core.condition.OrCondition | io.kestra.plugin.core.condition.PublicHolidayCondition | io.kestra.plugin.core.condition.TimeBetweenCondition | io.kestra.plugin.core.condition.WeekendCondition
io.kestra.core.models.executions.statistics.Flow object
flowId string required
namespace string required
io.kestra.core.models.flows.Concurrency object
limit integer required
exclusiveMin=0
behavior string

Default value is : QUEUE

Default: "QUEUE"
Values: "QUEUE" "CANCEL" "FAIL"
io.kestra.core.models.flows.DependsOn object
condition string
inputs string[]
io.kestra.core.models.flows.Flow object
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9._-]*
namespace string required
pattern=^[a-z0-9][a-z0-9._-]*
tasks io.kestra.plugin.core.debug.Echo | io.kestra.plugin.core.debug.Return | io.kestra.plugin.core.execution.Count | io.kestra.plugin.core.execution.Fail | io.kestra.plugin.core.execution.Labels | io.kestra.plugin.core.execution.PurgeExecutions | io.kestra.plugin.core.execution.Resume | io.kestra.plugin.core.flow.AllowFailure | io.kestra.plugin.core.flow.Dag | io.kestra.plugin.core.flow.EachParallel | io.kestra.plugin.core.flow.EachSequential | io.kestra.plugin.core.flow.ForEach | io.kestra.plugin.core.flow.ForEachItem | io.kestra.plugin.core.flow.If | io.kestra.plugin.core.flow.Parallel | io.kestra.plugin.core.flow.Pause | io.kestra.plugin.core.flow.Sequential | io.kestra.plugin.core.flow.Subflow | io.kestra.plugin.core.flow.Switch | io.kestra.plugin.core.flow.Template | io.kestra.plugin.core.flow.WaitFor | io.kestra.plugin.core.flow.WorkingDirectory | io.kestra.plugin.core.http.Download | io.kestra.plugin.core.http.Request | io.kestra.plugin.core.kv.Delete | io.kestra.plugin.core.kv.Get | io.kestra.plugin.core.kv.GetKeys | io.kestra.plugin.core.kv.Set | io.kestra.plugin.core.log.Fetch | io.kestra.plugin.core.log.Log | io.kestra.plugin.core.log.PurgeLogs | io.kestra.plugin.core.namespace.DeleteFiles | io.kestra.plugin.core.namespace.DownloadFiles | io.kestra.plugin.core.namespace.UploadFiles | io.kestra.plugin.core.output.OutputValues | io.kestra.plugin.core.state.Delete | io.kestra.plugin.core.state.Get | io.kestra.plugin.core.state.Set | io.kestra.plugin.core.storage.Concat | io.kestra.plugin.core.storage.DeduplicateItems | io.kestra.plugin.core.storage.Delete | io.kestra.plugin.core.storage.FilterItems | io.kestra.plugin.core.storage.LocalFiles | io.kestra.plugin.core.storage.PurgeCurrentExecutionFiles | io.kestra.plugin.core.storage.Reverse | io.kestra.plugin.core.storage.Size | io.kestra.plugin.core.storage.Split | io.kestra.plugin.core.templating.TemplatedTask | io.kestra.plugin.core.trigger.Toggle | io.kestra.core.tasks.scripts.Bash | io.kestra.plugin.scripts.shell.Commands | io.kestra.plugin.scripts.shell.Script | io.kestra.plugin.solace.Consume | io.kestra.plugin.solace.Produce | io.kestra.plugin.debezium.db2.Capture | io.kestra.plugin.scripts.jython.Eval | io.kestra.plugin.scripts.jython.FileTransform | io.kestra.plugin.git.Clone | io.kestra.plugin.git.Push | io.kestra.plugin.git.PushFlows | io.kestra.plugin.git.PushNamespaceFiles | io.kestra.plugin.git.Sync | io.kestra.plugin.git.SyncFlows | io.kestra.plugin.git.SyncNamespaceFiles | io.kestra.plugin.jdbc.sqlite.Query | io.kestra.plugin.debezium.mysql.Capture | io.kestra.plugin.jdbc.as400.Query | io.kestra.plugin.surrealdb.Query | io.kestra.plugin.ansible.cli.AnsibleCLI | io.kestra.plugin.jdbc.arrowflight.Query | io.kestra.plugin.serdes.avro.AvroToIon | io.kestra.plugin.serdes.avro.IonToAvro | io.kestra.plugin.serdes.csv.CsvToIon | io.kestra.plugin.serdes.csv.IonToCsv | io.kestra.plugin.serdes.excel.ExcelToIon | io.kestra.plugin.serdes.excel.IonToExcel | io.kestra.plugin.serdes.json.IonToJson | io.kestra.plugin.serdes.json.JsonToIon | io.kestra.plugin.serdes.parquet.IonToParquet | io.kestra.plugin.serdes.parquet.ParquetToIon | io.kestra.plugin.serdes.xml.IonToXml | io.kestra.plugin.serdes.xml.XmlToIon | io.kestra.plugin.debezium.mongodb.Capture | io.kestra.plugin.jdbc.vertica.Batch | io.kestra.plugin.jdbc.vertica.Query | io.kestra.plugin.jdbc.pinot.Query | io.kestra.plugin.neo4j.Batch | io.kestra.plugin.neo4j.Query | io.kestra.plugin.nats.Consume | io.kestra.plugin.nats.Produce | io.kestra.plugin.nats.kv.CreateBucket | io.kestra.plugin.nats.kv.Delete | io.kestra.plugin.nats.kv.Get | io.kestra.plugin.nats.kv.Put | io.kestra.plugin.pulsar.Consume | io.kestra.plugin.pulsar.Produce | io.kestra.plugin.pulsar.Reader | io.kestra.plugin.scripts.ruby.Commands | io.kestra.plugin.scripts.ruby.Script | io.kestra.plugin.notifications.discord.DiscordExecution | io.kestra.plugin.notifications.discord.DiscordIncomingWebhook | io.kestra.plugin.notifications.google.GoogleChatExecution | io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook | io.kestra.plugin.notifications.mail.MailExecution | io.kestra.plugin.notifications.mail.MailSend | io.kestra.plugin.notifications.opsgenie.OpsgenieAlert | io.kestra.plugin.notifications.opsgenie.OpsgenieExecution | io.kestra.plugin.notifications.pagerduty.PagerDutyAlert | io.kestra.plugin.notifications.pagerduty.PagerDutyExecution | io.kestra.plugin.notifications.sendgrid.SendGridMailExecution | io.kestra.plugin.notifications.sendgrid.SendGridMailSend | io.kestra.plugin.notifications.sentry.SentryAlert | io.kestra.plugin.notifications.sentry.SentryExecution | io.kestra.plugin.notifications.slack.SlackExecution | io.kestra.plugin.notifications.slack.SlackIncomingWebhook | io.kestra.plugin.notifications.teams.TeamsExecution | io.kestra.plugin.notifications.teams.TeamsIncomingWebhook | io.kestra.plugin.notifications.telegram.TelegramExecution | io.kestra.plugin.notifications.telegram.TelegramSend | io.kestra.plugin.notifications.twilio.TwilioAlert | io.kestra.plugin.notifications.twilio.TwilioExecution | io.kestra.plugin.notifications.whatsapp.WhatsAppExecution | io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook | io.kestra.plugin.notifications.zenduty.ZendutyAlert | io.kestra.plugin.notifications.zenduty.ZendutyExecution | io.kestra.plugin.notifications.zulip.ZulipExecution | io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook | io.kestra.plugin.transform.grok.TransformItems | io.kestra.plugin.transform.grok.TransformValue | io.kestra.plugin.debezium.oracle.Capture | io.kestra.plugin.tika.Parse | io.kestra.plugin.jdbc.db2.Query | io.kestra.plugin.debezium.sqlserver.Capture | io.kestra.plugin.transform.jsonata.TransformItems | io.kestra.plugin.transform.jsonata.TransformValue | io.kestra.plugin.meilisearch.DocumentAdd | io.kestra.plugin.meilisearch.DocumentGet | io.kestra.plugin.meilisearch.FacetSearch | io.kestra.plugin.meilisearch.Search | io.kestra.plugin.jira.issues.Create | io.kestra.plugin.jira.issues.CreateComment | io.kestra.plugin.jira.issues.UpdateFields | io.kestra.plugin.singer.taps.BigQuery | io.kestra.plugin.singer.taps.BingAds | io.kestra.plugin.singer.taps.ChargeBee | io.kestra.plugin.singer.taps.ExchangeRateHost | io.kestra.plugin.singer.taps.FacebookAds | io.kestra.plugin.singer.taps.Fastly | io.kestra.plugin.singer.taps.GenericTap | io.kestra.plugin.singer.taps.GitHub | io.kestra.plugin.singer.taps.Gitlab | io.kestra.plugin.singer.taps.GoogleAdwords | io.kestra.plugin.singer.taps.GoogleAnalytics | io.kestra.plugin.singer.taps.GoogleSearchConsole | io.kestra.plugin.singer.taps.HubSpot | io.kestra.plugin.singer.taps.Marketo | io.kestra.plugin.singer.taps.Netsuite | io.kestra.plugin.singer.taps.PipelinewiseMongoDb | io.kestra.plugin.singer.taps.PipelinewiseMysql | io.kestra.plugin.singer.taps.PipelinewiseOracle | io.kestra.plugin.singer.taps.PipelinewisePostgres | io.kestra.plugin.singer.taps.PipelinewiseSqlServer | io.kestra.plugin.singer.taps.Quickbooks | io.kestra.plugin.singer.taps.Recharge | io.kestra.plugin.singer.taps.SageIntacct | io.kestra.plugin.singer.taps.Salesforce | io.kestra.plugin.singer.taps.Shopify | io.kestra.plugin.singer.taps.Slack | io.kestra.plugin.singer.taps.Stripe | io.kestra.plugin.singer.taps.Zendesk | io.kestra.plugin.singer.taps.Zoom | io.kestra.plugin.singer.targets.AdswerveBigQuery | io.kestra.plugin.singer.targets.Csv | io.kestra.plugin.singer.targets.DatamillCoPostgres | io.kestra.plugin.singer.targets.GenericTarget | io.kestra.plugin.singer.targets.Json | io.kestra.plugin.singer.targets.MeltanoSnowflake | io.kestra.plugin.singer.targets.Oracle | io.kestra.plugin.singer.targets.PipelinewisePostgres | io.kestra.plugin.singer.targets.PipelinewiseRedshift | io.kestra.plugin.singer.targets.PipelinewiseSnowflake | io.kestra.plugin.singer.targets.SqlServer | io.kestra.plugin.gcp.auth.OauthAccessToken | io.kestra.plugin.gcp.bigquery.Copy | io.kestra.plugin.gcp.bigquery.CopyPartitions | io.kestra.plugin.gcp.bigquery.CreateDataset | io.kestra.plugin.gcp.bigquery.CreateTable | io.kestra.plugin.gcp.bigquery.DeleteDataset | io.kestra.plugin.gcp.bigquery.DeletePartitions | io.kestra.plugin.gcp.bigquery.DeleteTable | io.kestra.plugin.gcp.bigquery.ExtractToGcs | io.kestra.plugin.gcp.bigquery.Load | io.kestra.plugin.gcp.bigquery.LoadFromGcs | io.kestra.plugin.gcp.bigquery.Query | io.kestra.plugin.gcp.bigquery.StorageWrite | io.kestra.plugin.gcp.bigquery.TableMetadata | io.kestra.plugin.gcp.bigquery.UpdateDataset | io.kestra.plugin.gcp.bigquery.UpdateTable | io.kestra.plugin.gcp.cli.GCloudCLI | io.kestra.plugin.gcp.dataproc.batches.PySparkSubmit | io.kestra.plugin.gcp.dataproc.batches.RSparkSubmit | io.kestra.plugin.gcp.dataproc.batches.SparkSqlSubmit | io.kestra.plugin.gcp.dataproc.batches.SparkSubmit | io.kestra.plugin.gcp.dataproc.clusters.Create | io.kestra.plugin.gcp.dataproc.clusters.Delete | io.kestra.plugin.gcp.firestore.Delete | io.kestra.plugin.gcp.firestore.Get | io.kestra.plugin.gcp.firestore.Query | io.kestra.plugin.gcp.firestore.Set | io.kestra.plugin.gcp.gcs.Compose | io.kestra.plugin.gcp.gcs.Copy | io.kestra.plugin.gcp.gcs.CreateBucket | io.kestra.plugin.gcp.gcs.CreateBucketIamPolicy | io.kestra.plugin.gcp.gcs.Delete | io.kestra.plugin.gcp.gcs.DeleteBucket | io.kestra.plugin.gcp.gcs.DeleteList | io.kestra.plugin.gcp.gcs.Download | io.kestra.plugin.gcp.gcs.Downloads | io.kestra.plugin.gcp.gcs.List | io.kestra.plugin.gcp.gcs.UpdateBucket | io.kestra.plugin.gcp.gcs.Upload | io.kestra.plugin.gcp.gke.ClusterMetadata | io.kestra.plugin.gcp.pubsub.Consume | io.kestra.plugin.gcp.pubsub.Publish | io.kestra.plugin.gcp.vertexai.ChatCompletion | io.kestra.plugin.gcp.vertexai.CustomJob | io.kestra.plugin.gcp.vertexai.MultimodalCompletion | io.kestra.plugin.gcp.vertexai.TextCompletion | io.kestra.plugin.hubspot.tickets.Create | io.kestra.plugin.terraform.cli.TerraformCLI | io.kestra.plugin.jdbc.duckdb.Query | io.kestra.plugin.powerbi.RefreshGroupDataset | io.kestra.plugin.minio.Copy | io.kestra.plugin.minio.CreateBucket | io.kestra.plugin.minio.Delete | io.kestra.plugin.minio.DeleteList | io.kestra.plugin.minio.Download | io.kestra.plugin.minio.Downloads | io.kestra.plugin.minio.List | io.kestra.plugin.minio.Upload | io.kestra.plugin.spark.JarSubmit | io.kestra.plugin.spark.PythonSubmit | io.kestra.plugin.spark.RSubmit | io.kestra.plugin.spark.SparkCLI | io.kestra.plugin.scripts.powershell.Commands | io.kestra.plugin.scripts.powershell.Script | io.kestra.plugin.jdbc.oracle.Batch | io.kestra.plugin.jdbc.oracle.Query | io.kestra.plugin.elasticsearch.Bulk | io.kestra.plugin.elasticsearch.Get | io.kestra.plugin.elasticsearch.Load | io.kestra.plugin.elasticsearch.Put | io.kestra.plugin.elasticsearch.Request | io.kestra.plugin.elasticsearch.Scroll | io.kestra.plugin.elasticsearch.Search | io.kestra.plugin.cloudquery.CloudQueryCLI | io.kestra.plugin.cloudquery.Sync | io.kestra.plugin.openai.ChatCompletion | io.kestra.plugin.openai.CreateImage | io.kestra.plugin.amqp.Consume | io.kestra.plugin.amqp.CreateQueue | io.kestra.plugin.amqp.DeclareExchange | io.kestra.plugin.amqp.Publish | io.kestra.plugin.amqp.QueueBind | io.kestra.plugin.scripts.r.Commands | io.kestra.plugin.scripts.r.Script | io.kestra.plugin.hightouch.Sync | io.kestra.plugin.ldap.Add | io.kestra.plugin.ldap.Delete | io.kestra.plugin.ldap.IonToLdif | io.kestra.plugin.ldap.LdifToIon | io.kestra.plugin.ldap.Modify | io.kestra.plugin.ldap.Search | io.kestra.plugin.cassandra.astradb.Query | io.kestra.plugin.cassandra.standard.Query | io.kestra.plugin.malloy.CLI | io.kestra.plugin.fivetran.connectors.Sync | io.kestra.plugin.mongodb.Bulk | io.kestra.plugin.mongodb.Delete | io.kestra.plugin.mongodb.Find | io.kestra.plugin.mongodb.InsertOne | io.kestra.plugin.mongodb.Load | io.kestra.plugin.mongodb.Update | io.kestra.plugin.redis.list.ListPop | io.kestra.plugin.redis.list.ListPush | io.kestra.plugin.redis.pubsub.Publish | io.kestra.plugin.redis.string.Delete | io.kestra.plugin.redis.string.Get | io.kestra.plugin.redis.string.Set | io.kestra.plugin.zendesk.tickets.Create | io.kestra.plugin.databricks.cluster.CreateCluster | io.kestra.plugin.databricks.cluster.DeleteCluster | io.kestra.plugin.databricks.dbfs.Download | io.kestra.plugin.databricks.dbfs.Upload | io.kestra.plugin.databricks.job.CreateJob | io.kestra.plugin.databricks.job.SubmitRun | io.kestra.plugin.databricks.sql.Query | io.kestra.plugin.linear.issues.Create | io.kestra.plugin.airbyte.cloud.jobs.Reset | io.kestra.plugin.airbyte.cloud.jobs.Sync | io.kestra.plugin.airbyte.connections.CheckStatus | io.kestra.plugin.airbyte.connections.Sync | io.kestra.plugin.jdbc.trino.Query | io.kestra.plugin.jdbc.sybase.Query | io.kestra.plugin.airflow.dags.TriggerDagRun | io.kestra.plugin.scripts.jbang.Commands | io.kestra.plugin.scripts.jbang.Script | io.kestra.plugin.scripts.groovy.Eval | io.kestra.plugin.scripts.groovy.FileTransform | io.kestra.plugin.dataform.cli.DataformCLI | io.kestra.plugin.jdbc.sqlserver.Batch | io.kestra.plugin.jdbc.sqlserver.Query | io.kestra.plugin.jdbc.clickhouse.BulkInsert | io.kestra.plugin.jdbc.clickhouse.ClickHouseLocalCLI | io.kestra.plugin.jdbc.clickhouse.Query | io.kestra.plugin.jdbc.druid.Query | io.kestra.plugin.github.code.Search | io.kestra.plugin.github.commits.Search | io.kestra.plugin.github.issues.Comment | io.kestra.plugin.github.issues.Create | io.kestra.plugin.github.issues.Search | io.kestra.plugin.github.pulls.Create | io.kestra.plugin.github.pulls.Search | io.kestra.plugin.github.repositories.Search | io.kestra.plugin.github.topics.Search | io.kestra.plugin.github.users.Search | io.kestra.plugin.soda.Scan | io.kestra.plugin.docker.Build | io.kestra.plugin.docker.Run | io.kestra.plugin.servicenow.Post | io.kestra.plugin.fs.ftp.Delete | io.kestra.plugin.fs.ftp.Download | io.kestra.plugin.fs.ftp.Downloads | io.kestra.plugin.fs.ftp.List | io.kestra.plugin.fs.ftp.Move | io.kestra.plugin.fs.ftp.Upload | io.kestra.plugin.fs.ftp.Uploads | io.kestra.plugin.fs.ftps.Delete | io.kestra.plugin.fs.ftps.Download | io.kestra.plugin.fs.ftps.Downloads | io.kestra.plugin.fs.ftps.List | io.kestra.plugin.fs.ftps.Move | io.kestra.plugin.fs.ftps.Upload | io.kestra.plugin.fs.ftps.Uploads | io.kestra.plugin.fs.sftp.Delete | io.kestra.plugin.fs.sftp.Download | io.kestra.plugin.fs.sftp.Downloads | io.kestra.plugin.fs.sftp.List | io.kestra.plugin.fs.sftp.Move | io.kestra.plugin.fs.sftp.Upload | io.kestra.plugin.fs.sftp.Uploads | io.kestra.plugin.fs.smb.Delete | io.kestra.plugin.fs.smb.Download | io.kestra.plugin.fs.smb.Downloads | io.kestra.plugin.fs.smb.List | io.kestra.plugin.fs.smb.Move | io.kestra.plugin.fs.smb.Upload | io.kestra.plugin.fs.smb.Uploads | io.kestra.plugin.fs.ssh.Command | io.kestra.plugin.crypto.openpgp.Decrypt | io.kestra.plugin.crypto.openpgp.Encrypt | io.kestra.plugin.weaviate.BatchCreate | io.kestra.plugin.weaviate.Delete | io.kestra.plugin.weaviate.Query | io.kestra.plugin.weaviate.SchemaCreate | io.kestra.core.tasks.scripts.Node | io.kestra.plugin.scripts.node.Commands | io.kestra.plugin.scripts.node.Script | io.kestra.plugin.jdbc.mysql.Batch | io.kestra.plugin.jdbc.mysql.Query | io.kestra.plugin.modal.cli.ModalCLI | io.kestra.plugin.jdbc.vectorwise.Batch | io.kestra.plugin.jdbc.vectorwise.Query | io.kestra.plugin.jdbc.redshift.Query | io.kestra.plugin.kubernetes.PodCreate | io.kestra.plugin.kubernetes.kubectl.Apply | io.kestra.plugin.jdbc.postgresql.Batch | io.kestra.plugin.jdbc.postgresql.CopyIn | io.kestra.plugin.jdbc.postgresql.CopyOut | io.kestra.plugin.jdbc.postgresql.Query | io.kestra.plugin.mqtt.Publish | io.kestra.plugin.mqtt.Subscribe | io.kestra.plugin.sqlmesh.cli.SQLMeshCLI | io.kestra.plugin.couchbase.Query | io.kestra.plugin.scripts.julia.Commands | io.kestra.plugin.scripts.julia.Script | io.kestra.plugin.jdbc.dremio.Query | io.kestra.plugin.googleworkspace.drive.Create | io.kestra.plugin.googleworkspace.drive.Delete | io.kestra.plugin.googleworkspace.drive.Download | io.kestra.plugin.googleworkspace.drive.Export | io.kestra.plugin.googleworkspace.drive.List | io.kestra.plugin.googleworkspace.drive.Upload | io.kestra.plugin.googleworkspace.sheets.CreateSpreadsheet | io.kestra.plugin.googleworkspace.sheets.DeleteSpreadsheet | io.kestra.plugin.googleworkspace.sheets.Load | io.kestra.plugin.googleworkspace.sheets.Read | io.kestra.plugin.googleworkspace.sheets.ReadRange | io.kestra.plugin.dbt.cli.Build | io.kestra.plugin.dbt.cli.Compile | io.kestra.plugin.dbt.cli.DbtCLI | io.kestra.plugin.dbt.cli.Deps | io.kestra.plugin.dbt.cli.Freshness | io.kestra.plugin.dbt.cli.List | io.kestra.plugin.dbt.cli.Run | io.kestra.plugin.dbt.cli.Seed | io.kestra.plugin.dbt.cli.Setup | io.kestra.plugin.dbt.cli.Snapshot | io.kestra.plugin.dbt.cli.Test | io.kestra.plugin.dbt.cloud.CheckStatus | io.kestra.plugin.dbt.cloud.TriggerRun | io.kestra.plugin.scripts.nashorn.Eval | io.kestra.plugin.scripts.nashorn.FileTransform | io.kestra.plugin.aws.athena.Query | io.kestra.plugin.aws.cli.AwsCLI | io.kestra.plugin.aws.dynamodb.DeleteItem | io.kestra.plugin.aws.dynamodb.GetItem | io.kestra.plugin.aws.dynamodb.PutItem | io.kestra.plugin.aws.dynamodb.Query | io.kestra.plugin.aws.dynamodb.Scan | io.kestra.plugin.aws.ecr.GetAuthToken | io.kestra.plugin.aws.eventbridge.PutEvents | io.kestra.plugin.aws.kinesis.PutRecords | io.kestra.plugin.aws.lambda.Invoke | io.kestra.plugin.aws.s3.Copy | io.kestra.plugin.aws.s3.CreateBucket | io.kestra.plugin.aws.s3.Delete | io.kestra.plugin.aws.s3.DeleteList | io.kestra.plugin.aws.s3.Download | io.kestra.plugin.aws.s3.Downloads | io.kestra.plugin.aws.s3.List | io.kestra.plugin.aws.s3.Upload | io.kestra.plugin.aws.sns.Publish | io.kestra.plugin.aws.sqs.Consume | io.kestra.plugin.aws.sqs.Publish | io.kestra.plugin.compress.ArchiveCompress | io.kestra.plugin.compress.ArchiveDecompress | io.kestra.plugin.compress.FileCompress | io.kestra.plugin.compress.FileDecompress | io.kestra.core.tasks.scripts.Python | io.kestra.plugin.scripts.python.Commands | io.kestra.plugin.scripts.python.Script | io.kestra.plugin.debezium.postgres.Capture | io.kestra.plugin.azure.batch.job.Create | io.kestra.plugin.azure.batch.pool.Resize | io.kestra.plugin.azure.cli.AzCLI | io.kestra.plugin.azure.datafactory.CreateRun | io.kestra.plugin.azure.eventhubs.Consume | io.kestra.plugin.azure.eventhubs.Produce | io.kestra.plugin.azure.storage.blob.Copy | io.kestra.plugin.azure.storage.blob.Delete | io.kestra.plugin.azure.storage.blob.DeleteList | io.kestra.plugin.azure.storage.blob.Download | io.kestra.plugin.azure.storage.blob.Downloads | io.kestra.plugin.azure.storage.blob.List | io.kestra.plugin.azure.storage.blob.SharedAccess | io.kestra.plugin.azure.storage.blob.Upload | io.kestra.plugin.azure.storage.table.Bulk | io.kestra.plugin.azure.storage.table.Delete | io.kestra.plugin.azure.storage.table.Get | io.kestra.plugin.azure.storage.table.List | io.kestra.plugin.kafka.Consume | io.kestra.plugin.kafka.Produce | io.kestra.plugin.jdbc.snowflake.Download | io.kestra.plugin.jdbc.snowflake.Query | io.kestra.plugin.jdbc.snowflake.Upload[] required
minItems=1
concurrency object
2 nested properties
limit integer required
exclusiveMin=0
behavior string

Default value is : QUEUE

Default: "QUEUE"
Values: "QUEUE" "CANCEL" "FAIL"
deleted boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errors io.kestra.plugin.core.debug.Echo | io.kestra.plugin.core.debug.Return | io.kestra.plugin.core.execution.Count | io.kestra.plugin.core.execution.Fail | io.kestra.plugin.core.execution.Labels | io.kestra.plugin.core.execution.PurgeExecutions | io.kestra.plugin.core.execution.Resume | io.kestra.plugin.core.flow.AllowFailure | io.kestra.plugin.core.flow.Dag | io.kestra.plugin.core.flow.EachParallel | io.kestra.plugin.core.flow.EachSequential | io.kestra.plugin.core.flow.ForEach | io.kestra.plugin.core.flow.ForEachItem | io.kestra.plugin.core.flow.If | io.kestra.plugin.core.flow.Parallel | io.kestra.plugin.core.flow.Pause | io.kestra.plugin.core.flow.Sequential | io.kestra.plugin.core.flow.Subflow | io.kestra.plugin.core.flow.Switch | io.kestra.plugin.core.flow.Template | io.kestra.plugin.core.flow.WaitFor | io.kestra.plugin.core.flow.WorkingDirectory | io.kestra.plugin.core.http.Download | io.kestra.plugin.core.http.Request | io.kestra.plugin.core.kv.Delete | io.kestra.plugin.core.kv.Get | io.kestra.plugin.core.kv.GetKeys | io.kestra.plugin.core.kv.Set | io.kestra.plugin.core.log.Fetch | io.kestra.plugin.core.log.Log | io.kestra.plugin.core.log.PurgeLogs | io.kestra.plugin.core.namespace.DeleteFiles | io.kestra.plugin.core.namespace.DownloadFiles | io.kestra.plugin.core.namespace.UploadFiles | io.kestra.plugin.core.output.OutputValues | io.kestra.plugin.core.state.Delete | io.kestra.plugin.core.state.Get | io.kestra.plugin.core.state.Set | io.kestra.plugin.core.storage.Concat | io.kestra.plugin.core.storage.DeduplicateItems | io.kestra.plugin.core.storage.Delete | io.kestra.plugin.core.storage.FilterItems | io.kestra.plugin.core.storage.LocalFiles | io.kestra.plugin.core.storage.PurgeCurrentExecutionFiles | io.kestra.plugin.core.storage.Reverse | io.kestra.plugin.core.storage.Size | io.kestra.plugin.core.storage.Split | io.kestra.plugin.core.templating.TemplatedTask | io.kestra.plugin.core.trigger.Toggle | io.kestra.core.tasks.scripts.Bash | io.kestra.plugin.scripts.shell.Commands | io.kestra.plugin.scripts.shell.Script | io.kestra.plugin.solace.Consume | io.kestra.plugin.solace.Produce | io.kestra.plugin.debezium.db2.Capture | io.kestra.plugin.scripts.jython.Eval | io.kestra.plugin.scripts.jython.FileTransform | io.kestra.plugin.git.Clone | io.kestra.plugin.git.Push | io.kestra.plugin.git.PushFlows | io.kestra.plugin.git.PushNamespaceFiles | io.kestra.plugin.git.Sync | io.kestra.plugin.git.SyncFlows | io.kestra.plugin.git.SyncNamespaceFiles | io.kestra.plugin.jdbc.sqlite.Query | io.kestra.plugin.debezium.mysql.Capture | io.kestra.plugin.jdbc.as400.Query | io.kestra.plugin.surrealdb.Query | io.kestra.plugin.ansible.cli.AnsibleCLI | io.kestra.plugin.jdbc.arrowflight.Query | io.kestra.plugin.serdes.avro.AvroToIon | io.kestra.plugin.serdes.avro.IonToAvro | io.kestra.plugin.serdes.csv.CsvToIon | io.kestra.plugin.serdes.csv.IonToCsv | io.kestra.plugin.serdes.excel.ExcelToIon | io.kestra.plugin.serdes.excel.IonToExcel | io.kestra.plugin.serdes.json.IonToJson | io.kestra.plugin.serdes.json.JsonToIon | io.kestra.plugin.serdes.parquet.IonToParquet | io.kestra.plugin.serdes.parquet.ParquetToIon | io.kestra.plugin.serdes.xml.IonToXml | io.kestra.plugin.serdes.xml.XmlToIon | io.kestra.plugin.debezium.mongodb.Capture | io.kestra.plugin.jdbc.vertica.Batch | io.kestra.plugin.jdbc.vertica.Query | io.kestra.plugin.jdbc.pinot.Query | io.kestra.plugin.neo4j.Batch | io.kestra.plugin.neo4j.Query | io.kestra.plugin.nats.Consume | io.kestra.plugin.nats.Produce | io.kestra.plugin.nats.kv.CreateBucket | io.kestra.plugin.nats.kv.Delete | io.kestra.plugin.nats.kv.Get | io.kestra.plugin.nats.kv.Put | io.kestra.plugin.pulsar.Consume | io.kestra.plugin.pulsar.Produce | io.kestra.plugin.pulsar.Reader | io.kestra.plugin.scripts.ruby.Commands | io.kestra.plugin.scripts.ruby.Script | io.kestra.plugin.notifications.discord.DiscordExecution | io.kestra.plugin.notifications.discord.DiscordIncomingWebhook | io.kestra.plugin.notifications.google.GoogleChatExecution | io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook | io.kestra.plugin.notifications.mail.MailExecution | io.kestra.plugin.notifications.mail.MailSend | io.kestra.plugin.notifications.opsgenie.OpsgenieAlert | io.kestra.plugin.notifications.opsgenie.OpsgenieExecution | io.kestra.plugin.notifications.pagerduty.PagerDutyAlert | io.kestra.plugin.notifications.pagerduty.PagerDutyExecution | io.kestra.plugin.notifications.sendgrid.SendGridMailExecution | io.kestra.plugin.notifications.sendgrid.SendGridMailSend | io.kestra.plugin.notifications.sentry.SentryAlert | io.kestra.plugin.notifications.sentry.SentryExecution | io.kestra.plugin.notifications.slack.SlackExecution | io.kestra.plugin.notifications.slack.SlackIncomingWebhook | io.kestra.plugin.notifications.teams.TeamsExecution | io.kestra.plugin.notifications.teams.TeamsIncomingWebhook | io.kestra.plugin.notifications.telegram.TelegramExecution | io.kestra.plugin.notifications.telegram.TelegramSend | io.kestra.plugin.notifications.twilio.TwilioAlert | io.kestra.plugin.notifications.twilio.TwilioExecution | io.kestra.plugin.notifications.whatsapp.WhatsAppExecution | io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook | io.kestra.plugin.notifications.zenduty.ZendutyAlert | io.kestra.plugin.notifications.zenduty.ZendutyExecution | io.kestra.plugin.notifications.zulip.ZulipExecution | io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook | io.kestra.plugin.transform.grok.TransformItems | io.kestra.plugin.transform.grok.TransformValue | io.kestra.plugin.debezium.oracle.Capture | io.kestra.plugin.tika.Parse | io.kestra.plugin.jdbc.db2.Query | io.kestra.plugin.debezium.sqlserver.Capture | io.kestra.plugin.transform.jsonata.TransformItems | io.kestra.plugin.transform.jsonata.TransformValue | io.kestra.plugin.meilisearch.DocumentAdd | io.kestra.plugin.meilisearch.DocumentGet | io.kestra.plugin.meilisearch.FacetSearch | io.kestra.plugin.meilisearch.Search | io.kestra.plugin.jira.issues.Create | io.kestra.plugin.jira.issues.CreateComment | io.kestra.plugin.jira.issues.UpdateFields | io.kestra.plugin.singer.taps.BigQuery | io.kestra.plugin.singer.taps.BingAds | io.kestra.plugin.singer.taps.ChargeBee | io.kestra.plugin.singer.taps.ExchangeRateHost | io.kestra.plugin.singer.taps.FacebookAds | io.kestra.plugin.singer.taps.Fastly | io.kestra.plugin.singer.taps.GenericTap | io.kestra.plugin.singer.taps.GitHub | io.kestra.plugin.singer.taps.Gitlab | io.kestra.plugin.singer.taps.GoogleAdwords | io.kestra.plugin.singer.taps.GoogleAnalytics | io.kestra.plugin.singer.taps.GoogleSearchConsole | io.kestra.plugin.singer.taps.HubSpot | io.kestra.plugin.singer.taps.Marketo | io.kestra.plugin.singer.taps.Netsuite | io.kestra.plugin.singer.taps.PipelinewiseMongoDb | io.kestra.plugin.singer.taps.PipelinewiseMysql | io.kestra.plugin.singer.taps.PipelinewiseOracle | io.kestra.plugin.singer.taps.PipelinewisePostgres | io.kestra.plugin.singer.taps.PipelinewiseSqlServer | io.kestra.plugin.singer.taps.Quickbooks | io.kestra.plugin.singer.taps.Recharge | io.kestra.plugin.singer.taps.SageIntacct | io.kestra.plugin.singer.taps.Salesforce | io.kestra.plugin.singer.taps.Shopify | io.kestra.plugin.singer.taps.Slack | io.kestra.plugin.singer.taps.Stripe | io.kestra.plugin.singer.taps.Zendesk | io.kestra.plugin.singer.taps.Zoom | io.kestra.plugin.singer.targets.AdswerveBigQuery | io.kestra.plugin.singer.targets.Csv | io.kestra.plugin.singer.targets.DatamillCoPostgres | io.kestra.plugin.singer.targets.GenericTarget | io.kestra.plugin.singer.targets.Json | io.kestra.plugin.singer.targets.MeltanoSnowflake | io.kestra.plugin.singer.targets.Oracle | io.kestra.plugin.singer.targets.PipelinewisePostgres | io.kestra.plugin.singer.targets.PipelinewiseRedshift | io.kestra.plugin.singer.targets.PipelinewiseSnowflake | io.kestra.plugin.singer.targets.SqlServer | io.kestra.plugin.gcp.auth.OauthAccessToken | io.kestra.plugin.gcp.bigquery.Copy | io.kestra.plugin.gcp.bigquery.CopyPartitions | io.kestra.plugin.gcp.bigquery.CreateDataset | io.kestra.plugin.gcp.bigquery.CreateTable | io.kestra.plugin.gcp.bigquery.DeleteDataset | io.kestra.plugin.gcp.bigquery.DeletePartitions | io.kestra.plugin.gcp.bigquery.DeleteTable | io.kestra.plugin.gcp.bigquery.ExtractToGcs | io.kestra.plugin.gcp.bigquery.Load | io.kestra.plugin.gcp.bigquery.LoadFromGcs | io.kestra.plugin.gcp.bigquery.Query | io.kestra.plugin.gcp.bigquery.StorageWrite | io.kestra.plugin.gcp.bigquery.TableMetadata | io.kestra.plugin.gcp.bigquery.UpdateDataset | io.kestra.plugin.gcp.bigquery.UpdateTable | io.kestra.plugin.gcp.cli.GCloudCLI | io.kestra.plugin.gcp.dataproc.batches.PySparkSubmit | io.kestra.plugin.gcp.dataproc.batches.RSparkSubmit | io.kestra.plugin.gcp.dataproc.batches.SparkSqlSubmit | io.kestra.plugin.gcp.dataproc.batches.SparkSubmit | io.kestra.plugin.gcp.dataproc.clusters.Create | io.kestra.plugin.gcp.dataproc.clusters.Delete | io.kestra.plugin.gcp.firestore.Delete | io.kestra.plugin.gcp.firestore.Get | io.kestra.plugin.gcp.firestore.Query | io.kestra.plugin.gcp.firestore.Set | io.kestra.plugin.gcp.gcs.Compose | io.kestra.plugin.gcp.gcs.Copy | io.kestra.plugin.gcp.gcs.CreateBucket | io.kestra.plugin.gcp.gcs.CreateBucketIamPolicy | io.kestra.plugin.gcp.gcs.Delete | io.kestra.plugin.gcp.gcs.DeleteBucket | io.kestra.plugin.gcp.gcs.DeleteList | io.kestra.plugin.gcp.gcs.Download | io.kestra.plugin.gcp.gcs.Downloads | io.kestra.plugin.gcp.gcs.List | io.kestra.plugin.gcp.gcs.UpdateBucket | io.kestra.plugin.gcp.gcs.Upload | io.kestra.plugin.gcp.gke.ClusterMetadata | io.kestra.plugin.gcp.pubsub.Consume | io.kestra.plugin.gcp.pubsub.Publish | io.kestra.plugin.gcp.vertexai.ChatCompletion | io.kestra.plugin.gcp.vertexai.CustomJob | io.kestra.plugin.gcp.vertexai.MultimodalCompletion | io.kestra.plugin.gcp.vertexai.TextCompletion | io.kestra.plugin.hubspot.tickets.Create | io.kestra.plugin.terraform.cli.TerraformCLI | io.kestra.plugin.jdbc.duckdb.Query | io.kestra.plugin.powerbi.RefreshGroupDataset | io.kestra.plugin.minio.Copy | io.kestra.plugin.minio.CreateBucket | io.kestra.plugin.minio.Delete | io.kestra.plugin.minio.DeleteList | io.kestra.plugin.minio.Download | io.kestra.plugin.minio.Downloads | io.kestra.plugin.minio.List | io.kestra.plugin.minio.Upload | io.kestra.plugin.spark.JarSubmit | io.kestra.plugin.spark.PythonSubmit | io.kestra.plugin.spark.RSubmit | io.kestra.plugin.spark.SparkCLI | io.kestra.plugin.scripts.powershell.Commands | io.kestra.plugin.scripts.powershell.Script | io.kestra.plugin.jdbc.oracle.Batch | io.kestra.plugin.jdbc.oracle.Query | io.kestra.plugin.elasticsearch.Bulk | io.kestra.plugin.elasticsearch.Get | io.kestra.plugin.elasticsearch.Load | io.kestra.plugin.elasticsearch.Put | io.kestra.plugin.elasticsearch.Request | io.kestra.plugin.elasticsearch.Scroll | io.kestra.plugin.elasticsearch.Search | io.kestra.plugin.cloudquery.CloudQueryCLI | io.kestra.plugin.cloudquery.Sync | io.kestra.plugin.openai.ChatCompletion | io.kestra.plugin.openai.CreateImage | io.kestra.plugin.amqp.Consume | io.kestra.plugin.amqp.CreateQueue | io.kestra.plugin.amqp.DeclareExchange | io.kestra.plugin.amqp.Publish | io.kestra.plugin.amqp.QueueBind | io.kestra.plugin.scripts.r.Commands | io.kestra.plugin.scripts.r.Script | io.kestra.plugin.hightouch.Sync | io.kestra.plugin.ldap.Add | io.kestra.plugin.ldap.Delete | io.kestra.plugin.ldap.IonToLdif | io.kestra.plugin.ldap.LdifToIon | io.kestra.plugin.ldap.Modify | io.kestra.plugin.ldap.Search | io.kestra.plugin.cassandra.astradb.Query | io.kestra.plugin.cassandra.standard.Query | io.kestra.plugin.malloy.CLI | io.kestra.plugin.fivetran.connectors.Sync | io.kestra.plugin.mongodb.Bulk | io.kestra.plugin.mongodb.Delete | io.kestra.plugin.mongodb.Find | io.kestra.plugin.mongodb.InsertOne | io.kestra.plugin.mongodb.Load | io.kestra.plugin.mongodb.Update | io.kestra.plugin.redis.list.ListPop | io.kestra.plugin.redis.list.ListPush | io.kestra.plugin.redis.pubsub.Publish | io.kestra.plugin.redis.string.Delete | io.kestra.plugin.redis.string.Get | io.kestra.plugin.redis.string.Set | io.kestra.plugin.zendesk.tickets.Create | io.kestra.plugin.databricks.cluster.CreateCluster | io.kestra.plugin.databricks.cluster.DeleteCluster | io.kestra.plugin.databricks.dbfs.Download | io.kestra.plugin.databricks.dbfs.Upload | io.kestra.plugin.databricks.job.CreateJob | io.kestra.plugin.databricks.job.SubmitRun | io.kestra.plugin.databricks.sql.Query | io.kestra.plugin.linear.issues.Create | io.kestra.plugin.airbyte.cloud.jobs.Reset | io.kestra.plugin.airbyte.cloud.jobs.Sync | io.kestra.plugin.airbyte.connections.CheckStatus | io.kestra.plugin.airbyte.connections.Sync | io.kestra.plugin.jdbc.trino.Query | io.kestra.plugin.jdbc.sybase.Query | io.kestra.plugin.airflow.dags.TriggerDagRun | io.kestra.plugin.scripts.jbang.Commands | io.kestra.plugin.scripts.jbang.Script | io.kestra.plugin.scripts.groovy.Eval | io.kestra.plugin.scripts.groovy.FileTransform | io.kestra.plugin.dataform.cli.DataformCLI | io.kestra.plugin.jdbc.sqlserver.Batch | io.kestra.plugin.jdbc.sqlserver.Query | io.kestra.plugin.jdbc.clickhouse.BulkInsert | io.kestra.plugin.jdbc.clickhouse.ClickHouseLocalCLI | io.kestra.plugin.jdbc.clickhouse.Query | io.kestra.plugin.jdbc.druid.Query | io.kestra.plugin.github.code.Search | io.kestra.plugin.github.commits.Search | io.kestra.plugin.github.issues.Comment | io.kestra.plugin.github.issues.Create | io.kestra.plugin.github.issues.Search | io.kestra.plugin.github.pulls.Create | io.kestra.plugin.github.pulls.Search | io.kestra.plugin.github.repositories.Search | io.kestra.plugin.github.topics.Search | io.kestra.plugin.github.users.Search | io.kestra.plugin.soda.Scan | io.kestra.plugin.docker.Build | io.kestra.plugin.docker.Run | io.kestra.plugin.servicenow.Post | io.kestra.plugin.fs.ftp.Delete | io.kestra.plugin.fs.ftp.Download | io.kestra.plugin.fs.ftp.Downloads | io.kestra.plugin.fs.ftp.List | io.kestra.plugin.fs.ftp.Move | io.kestra.plugin.fs.ftp.Upload | io.kestra.plugin.fs.ftp.Uploads | io.kestra.plugin.fs.ftps.Delete | io.kestra.plugin.fs.ftps.Download | io.kestra.plugin.fs.ftps.Downloads | io.kestra.plugin.fs.ftps.List | io.kestra.plugin.fs.ftps.Move | io.kestra.plugin.fs.ftps.Upload | io.kestra.plugin.fs.ftps.Uploads | io.kestra.plugin.fs.sftp.Delete | io.kestra.plugin.fs.sftp.Download | io.kestra.plugin.fs.sftp.Downloads | io.kestra.plugin.fs.sftp.List | io.kestra.plugin.fs.sftp.Move | io.kestra.plugin.fs.sftp.Upload | io.kestra.plugin.fs.sftp.Uploads | io.kestra.plugin.fs.smb.Delete | io.kestra.plugin.fs.smb.Download | io.kestra.plugin.fs.smb.Downloads | io.kestra.plugin.fs.smb.List | io.kestra.plugin.fs.smb.Move | io.kestra.plugin.fs.smb.Upload | io.kestra.plugin.fs.smb.Uploads | io.kestra.plugin.fs.ssh.Command | io.kestra.plugin.crypto.openpgp.Decrypt | io.kestra.plugin.crypto.openpgp.Encrypt | io.kestra.plugin.weaviate.BatchCreate | io.kestra.plugin.weaviate.Delete | io.kestra.plugin.weaviate.Query | io.kestra.plugin.weaviate.SchemaCreate | io.kestra.core.tasks.scripts.Node | io.kestra.plugin.scripts.node.Commands | io.kestra.plugin.scripts.node.Script | io.kestra.plugin.jdbc.mysql.Batch | io.kestra.plugin.jdbc.mysql.Query | io.kestra.plugin.modal.cli.ModalCLI | io.kestra.plugin.jdbc.vectorwise.Batch | io.kestra.plugin.jdbc.vectorwise.Query | io.kestra.plugin.jdbc.redshift.Query | io.kestra.plugin.kubernetes.PodCreate | io.kestra.plugin.kubernetes.kubectl.Apply | io.kestra.plugin.jdbc.postgresql.Batch | io.kestra.plugin.jdbc.postgresql.CopyIn | io.kestra.plugin.jdbc.postgresql.CopyOut | io.kestra.plugin.jdbc.postgresql.Query | io.kestra.plugin.mqtt.Publish | io.kestra.plugin.mqtt.Subscribe | io.kestra.plugin.sqlmesh.cli.SQLMeshCLI | io.kestra.plugin.couchbase.Query | io.kestra.plugin.scripts.julia.Commands | io.kestra.plugin.scripts.julia.Script | io.kestra.plugin.jdbc.dremio.Query | io.kestra.plugin.googleworkspace.drive.Create | io.kestra.plugin.googleworkspace.drive.Delete | io.kestra.plugin.googleworkspace.drive.Download | io.kestra.plugin.googleworkspace.drive.Export | io.kestra.plugin.googleworkspace.drive.List | io.kestra.plugin.googleworkspace.drive.Upload | io.kestra.plugin.googleworkspace.sheets.CreateSpreadsheet | io.kestra.plugin.googleworkspace.sheets.DeleteSpreadsheet | io.kestra.plugin.googleworkspace.sheets.Load | io.kestra.plugin.googleworkspace.sheets.Read | io.kestra.plugin.googleworkspace.sheets.ReadRange | io.kestra.plugin.dbt.cli.Build | io.kestra.plugin.dbt.cli.Compile | io.kestra.plugin.dbt.cli.DbtCLI | io.kestra.plugin.dbt.cli.Deps | io.kestra.plugin.dbt.cli.Freshness | io.kestra.plugin.dbt.cli.List | io.kestra.plugin.dbt.cli.Run | io.kestra.plugin.dbt.cli.Seed | io.kestra.plugin.dbt.cli.Setup | io.kestra.plugin.dbt.cli.Snapshot | io.kestra.plugin.dbt.cli.Test | io.kestra.plugin.dbt.cloud.CheckStatus | io.kestra.plugin.dbt.cloud.TriggerRun | io.kestra.plugin.scripts.nashorn.Eval | io.kestra.plugin.scripts.nashorn.FileTransform | io.kestra.plugin.aws.athena.Query | io.kestra.plugin.aws.cli.AwsCLI | io.kestra.plugin.aws.dynamodb.DeleteItem | io.kestra.plugin.aws.dynamodb.GetItem | io.kestra.plugin.aws.dynamodb.PutItem | io.kestra.plugin.aws.dynamodb.Query | io.kestra.plugin.aws.dynamodb.Scan | io.kestra.plugin.aws.ecr.GetAuthToken | io.kestra.plugin.aws.eventbridge.PutEvents | io.kestra.plugin.aws.kinesis.PutRecords | io.kestra.plugin.aws.lambda.Invoke | io.kestra.plugin.aws.s3.Copy | io.kestra.plugin.aws.s3.CreateBucket | io.kestra.plugin.aws.s3.Delete | io.kestra.plugin.aws.s3.DeleteList | io.kestra.plugin.aws.s3.Download | io.kestra.plugin.aws.s3.Downloads | io.kestra.plugin.aws.s3.List | io.kestra.plugin.aws.s3.Upload | io.kestra.plugin.aws.sns.Publish | io.kestra.plugin.aws.sqs.Consume | io.kestra.plugin.aws.sqs.Publish | io.kestra.plugin.compress.ArchiveCompress | io.kestra.plugin.compress.ArchiveDecompress | io.kestra.plugin.compress.FileCompress | io.kestra.plugin.compress.FileDecompress | io.kestra.core.tasks.scripts.Python | io.kestra.plugin.scripts.python.Commands | io.kestra.plugin.scripts.python.Script | io.kestra.plugin.debezium.postgres.Capture | io.kestra.plugin.azure.batch.job.Create | io.kestra.plugin.azure.batch.pool.Resize | io.kestra.plugin.azure.cli.AzCLI | io.kestra.plugin.azure.datafactory.CreateRun | io.kestra.plugin.azure.eventhubs.Consume | io.kestra.plugin.azure.eventhubs.Produce | io.kestra.plugin.azure.storage.blob.Copy | io.kestra.plugin.azure.storage.blob.Delete | io.kestra.plugin.azure.storage.blob.DeleteList | io.kestra.plugin.azure.storage.blob.Download | io.kestra.plugin.azure.storage.blob.Downloads | io.kestra.plugin.azure.storage.blob.List | io.kestra.plugin.azure.storage.blob.SharedAccess | io.kestra.plugin.azure.storage.blob.Upload | io.kestra.plugin.azure.storage.table.Bulk | io.kestra.plugin.azure.storage.table.Delete | io.kestra.plugin.azure.storage.table.Get | io.kestra.plugin.azure.storage.table.List | io.kestra.plugin.kafka.Consume | io.kestra.plugin.kafka.Produce | io.kestra.plugin.jdbc.snowflake.Download | io.kestra.plugin.jdbc.snowflake.Query | io.kestra.plugin.jdbc.snowflake.Upload[]
inputs io.kestra.core.models.flows.input.ArrayInput-2 | io.kestra.core.models.flows.input.BooleanInput-2 | io.kestra.core.models.flows.input.DateInput-2 | io.kestra.core.models.flows.input.DateTimeInput-2 | io.kestra.core.models.flows.input.DurationInput-2 | io.kestra.core.models.flows.input.FileInput-2 | io.kestra.core.models.flows.input.FloatInput-2 | io.kestra.core.models.flows.input.IntInput-2 | io.kestra.core.models.flows.input.JsonInput-2 | io.kestra.core.models.flows.input.SecretInput-2 | io.kestra.core.models.flows.input.StringInput-2 | io.kestra.core.models.flows.input.EnumInput-2 | io.kestra.core.models.flows.input.SelectInput-2 | io.kestra.core.models.flows.input.TimeInput-2 | io.kestra.core.models.flows.input.URIInput-2 | io.kestra.core.models.flows.input.MultiselectInput-2 | io.kestra.core.models.flows.input.YamlInput-2[]
labels array | object
listeners array
outputs array

Output values make information about the execution of your Flow available and expose for other Kestra flows to use. Output values are similar to return values in programming languages.

revision integer
min=1
taskDefaults array
tenantId string
pattern=^[a-z0-9][a-z0-9_-]*
triggers io.kestra.plugin.core.http.Trigger | io.kestra.plugin.core.trigger.Flow | io.kestra.plugin.core.trigger.Schedule | io.kestra.plugin.core.trigger.ScheduleOnDates | io.kestra.plugin.core.trigger.Webhook | io.kestra.plugin.solace.Trigger | io.kestra.plugin.debezium.db2.RealtimeTrigger | io.kestra.plugin.debezium.db2.Trigger | io.kestra.plugin.jdbc.sqlite.Trigger | io.kestra.plugin.debezium.mysql.RealtimeTrigger | io.kestra.plugin.debezium.mysql.Trigger | io.kestra.plugin.jdbc.as400.Trigger | io.kestra.plugin.surrealdb.Trigger | io.kestra.plugin.jdbc.arrowflight.Trigger | io.kestra.plugin.debezium.mongodb.RealtimeTrigger | io.kestra.plugin.debezium.mongodb.Trigger | io.kestra.plugin.jdbc.vertica.Trigger | io.kestra.plugin.jdbc.pinot.Trigger | io.kestra.plugin.nats.RealtimeTrigger | io.kestra.plugin.nats.Trigger | io.kestra.plugin.pulsar.RealtimeTrigger | io.kestra.plugin.pulsar.Trigger | io.kestra.plugin.debezium.oracle.RealtimeTrigger | io.kestra.plugin.debezium.oracle.Trigger | io.kestra.plugin.jdbc.db2.Trigger | io.kestra.plugin.debezium.sqlserver.RealtimeTrigger | io.kestra.plugin.debezium.sqlserver.Trigger | io.kestra.plugin.gcp.bigquery.Trigger | io.kestra.plugin.gcp.gcs.Trigger | io.kestra.plugin.gcp.pubsub.RealtimeTrigger | io.kestra.plugin.gcp.pubsub.Trigger | io.kestra.plugin.jdbc.duckdb.Trigger | io.kestra.plugin.minio.Trigger | io.kestra.plugin.jdbc.oracle.Trigger | io.kestra.plugin.amqp.RealtimeTrigger | io.kestra.plugin.amqp.Trigger | io.kestra.plugin.cassandra.astradb.Trigger | io.kestra.plugin.cassandra.standard.Trigger | io.kestra.plugin.mongodb.Trigger | io.kestra.plugin.redis.list.RealtimeTrigger | io.kestra.plugin.redis.list.Trigger | io.kestra.plugin.jdbc.trino.Trigger | io.kestra.plugin.jdbc.sybase.Trigger | io.kestra.plugin.jdbc.sqlserver.Trigger | io.kestra.plugin.jdbc.clickhouse.Trigger | io.kestra.plugin.jdbc.druid.Trigger | io.kestra.plugin.fs.ftp.Trigger | io.kestra.plugin.fs.ftps.Trigger | io.kestra.plugin.fs.sftp.Trigger | io.kestra.plugin.fs.smb.Trigger | io.kestra.plugin.jdbc.mysql.Trigger | io.kestra.plugin.jdbc.vectorwise.Trigger | io.kestra.plugin.jdbc.redshift.Trigger | io.kestra.plugin.jdbc.postgresql.Trigger | io.kestra.plugin.mqtt.RealtimeTrigger | io.kestra.plugin.mqtt.Trigger | io.kestra.plugin.couchbase.Trigger | io.kestra.plugin.jdbc.dremio.Trigger | io.kestra.plugin.aws.s3.Trigger | io.kestra.plugin.aws.sqs.RealtimeTrigger | io.kestra.plugin.aws.sqs.Trigger | io.kestra.plugin.debezium.postgres.RealtimeTrigger | io.kestra.plugin.debezium.postgres.Trigger | io.kestra.plugin.azure.eventhubs.RealtimeTrigger | io.kestra.plugin.azure.eventhubs.Trigger | io.kestra.plugin.azure.storage.blob.Trigger | io.kestra.plugin.kafka.RealtimeTrigger | io.kestra.plugin.kafka.Trigger | io.kestra.plugin.jdbc.snowflake.Trigger[]
variables object
io.kestra.core.models.flows.Output object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
value required
description string
io.kestra.core.models.flows.PluginDefault object
type string required
forced boolean

Default value is : false

Default: false
values object
io.kestra.core.models.flows.input.ArrayInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
itemType string required

Cannot be of type ARRAY.

Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.ArrayInput-2
io.kestra.core.models.flows.input.BooleanInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.BooleanInput-2
io.kestra.core.models.flows.input.DateInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
after string
format=date
before string
format=date
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.DateInput-2
io.kestra.core.models.flows.input.DateTimeInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
after string
format=date-time
before string
format=date-time
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.DateTimeInput-2
io.kestra.core.models.flows.input.DurationInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
max string
format=duration
min string
format=duration
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.DurationInput-2
io.kestra.core.models.flows.input.EnumInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
values string[] required

DEPRECATED; use 'SELECT' instead.

defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.EnumInput-2
io.kestra.core.models.flows.input.FileInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
extension string

Default value is : .upl

Default: ".upl"
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.FileInput-2
io.kestra.core.models.flows.input.FloatInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
max number
min number
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.FloatInput-2
io.kestra.core.models.flows.input.IntInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
max integer
min integer
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.IntInput-2
io.kestra.core.models.flows.input.JsonInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.JsonInput-2
io.kestra.core.models.flows.input.MultiselectInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
allowCustomValue boolean

Default value is : false

Default: false
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
expression string
itemType string

Cannot be of type ARRAY nor 'MULTISELECT'.

Default value is : STRING

Default: "STRING"
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
name string
options string[]
required boolean

Default value is : true

Default: true
values string[]
io.kestra.core.models.flows.input.MultiselectInput-2
io.kestra.core.models.flows.input.SecretInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
validator string
io.kestra.core.models.flows.input.SecretInput-2
io.kestra.core.models.flows.input.SelectInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
allowCustomValue boolean

Default value is : false

Default: false
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
expression string
name string
required boolean

Default value is : true

Default: true
values string[]
io.kestra.core.models.flows.input.SelectInput-2
io.kestra.core.models.flows.input.StringInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
validator string
io.kestra.core.models.flows.input.StringInput-2
io.kestra.core.models.flows.input.TimeInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
after string
format=time
before string
format=time
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.TimeInput-2
io.kestra.core.models.flows.input.URIInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.URIInput-2
io.kestra.core.models.flows.input.YamlInput-1 object
id string required
minLength=1pattern=^[a-zA-Z0-9][.a-zA-Z0-9_-]*
type string required
Values: "STRING" "ENUM" "SELECT" "INT" "FLOAT" "BOOLEAN" "DATETIME" "DATE" "TIME" "DURATION" "FILE" "JSON" "URI" "SECRET" "ARRAY" "MULTISELECT" "YAML"
defaults
dependsOn
All of: io.kestra.core.models.flows.DependsOn object, The dependencies of the input.
description string
displayName string
maxLength=64
name string
required boolean

Default value is : true

Default: true
io.kestra.core.models.flows.input.YamlInput-2
io.kestra.core.models.listeners.Listener object
tasks io.kestra.plugin.core.debug.Echo | io.kestra.plugin.core.debug.Return | io.kestra.plugin.core.execution.Count | io.kestra.plugin.core.execution.Fail | io.kestra.plugin.core.execution.Labels | io.kestra.plugin.core.execution.PurgeExecutions | io.kestra.plugin.core.execution.Resume | io.kestra.plugin.core.flow.AllowFailure | io.kestra.plugin.core.flow.Dag | io.kestra.plugin.core.flow.EachParallel | io.kestra.plugin.core.flow.EachSequential | io.kestra.plugin.core.flow.ForEach | io.kestra.plugin.core.flow.ForEachItem | io.kestra.plugin.core.flow.If | io.kestra.plugin.core.flow.Parallel | io.kestra.plugin.core.flow.Pause | io.kestra.plugin.core.flow.Sequential | io.kestra.plugin.core.flow.Subflow | io.kestra.plugin.core.flow.Switch | io.kestra.plugin.core.flow.Template | io.kestra.plugin.core.flow.WaitFor | io.kestra.plugin.core.flow.WorkingDirectory | io.kestra.plugin.core.http.Download | io.kestra.plugin.core.http.Request | io.kestra.plugin.core.kv.Delete | io.kestra.plugin.core.kv.Get | io.kestra.plugin.core.kv.GetKeys | io.kestra.plugin.core.kv.Set | io.kestra.plugin.core.log.Fetch | io.kestra.plugin.core.log.Log | io.kestra.plugin.core.log.PurgeLogs | io.kestra.plugin.core.namespace.DeleteFiles | io.kestra.plugin.core.namespace.DownloadFiles | io.kestra.plugin.core.namespace.UploadFiles | io.kestra.plugin.core.output.OutputValues | io.kestra.plugin.core.state.Delete | io.kestra.plugin.core.state.Get | io.kestra.plugin.core.state.Set | io.kestra.plugin.core.storage.Concat | io.kestra.plugin.core.storage.DeduplicateItems | io.kestra.plugin.core.storage.Delete | io.kestra.plugin.core.storage.FilterItems | io.kestra.plugin.core.storage.LocalFiles | io.kestra.plugin.core.storage.PurgeCurrentExecutionFiles | io.kestra.plugin.core.storage.Reverse | io.kestra.plugin.core.storage.Size | io.kestra.plugin.core.storage.Split | io.kestra.plugin.core.templating.TemplatedTask | io.kestra.plugin.core.trigger.Toggle | io.kestra.core.tasks.scripts.Bash | io.kestra.plugin.scripts.shell.Commands | io.kestra.plugin.scripts.shell.Script | io.kestra.plugin.solace.Consume | io.kestra.plugin.solace.Produce | io.kestra.plugin.debezium.db2.Capture | io.kestra.plugin.scripts.jython.Eval | io.kestra.plugin.scripts.jython.FileTransform | io.kestra.plugin.git.Clone | io.kestra.plugin.git.Push | io.kestra.plugin.git.PushFlows | io.kestra.plugin.git.PushNamespaceFiles | io.kestra.plugin.git.Sync | io.kestra.plugin.git.SyncFlows | io.kestra.plugin.git.SyncNamespaceFiles | io.kestra.plugin.jdbc.sqlite.Query | io.kestra.plugin.debezium.mysql.Capture | io.kestra.plugin.jdbc.as400.Query | io.kestra.plugin.surrealdb.Query | io.kestra.plugin.ansible.cli.AnsibleCLI | io.kestra.plugin.jdbc.arrowflight.Query | io.kestra.plugin.serdes.avro.AvroToIon | io.kestra.plugin.serdes.avro.IonToAvro | io.kestra.plugin.serdes.csv.CsvToIon | io.kestra.plugin.serdes.csv.IonToCsv | io.kestra.plugin.serdes.excel.ExcelToIon | io.kestra.plugin.serdes.excel.IonToExcel | io.kestra.plugin.serdes.json.IonToJson | io.kestra.plugin.serdes.json.JsonToIon | io.kestra.plugin.serdes.parquet.IonToParquet | io.kestra.plugin.serdes.parquet.ParquetToIon | io.kestra.plugin.serdes.xml.IonToXml | io.kestra.plugin.serdes.xml.XmlToIon | io.kestra.plugin.debezium.mongodb.Capture | io.kestra.plugin.jdbc.vertica.Batch | io.kestra.plugin.jdbc.vertica.Query | io.kestra.plugin.jdbc.pinot.Query | io.kestra.plugin.neo4j.Batch | io.kestra.plugin.neo4j.Query | io.kestra.plugin.nats.Consume | io.kestra.plugin.nats.Produce | io.kestra.plugin.nats.kv.CreateBucket | io.kestra.plugin.nats.kv.Delete | io.kestra.plugin.nats.kv.Get | io.kestra.plugin.nats.kv.Put | io.kestra.plugin.pulsar.Consume | io.kestra.plugin.pulsar.Produce | io.kestra.plugin.pulsar.Reader | io.kestra.plugin.scripts.ruby.Commands | io.kestra.plugin.scripts.ruby.Script | io.kestra.plugin.notifications.discord.DiscordExecution | io.kestra.plugin.notifications.discord.DiscordIncomingWebhook | io.kestra.plugin.notifications.google.GoogleChatExecution | io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook | io.kestra.plugin.notifications.mail.MailExecution | io.kestra.plugin.notifications.mail.MailSend | io.kestra.plugin.notifications.opsgenie.OpsgenieAlert | io.kestra.plugin.notifications.opsgenie.OpsgenieExecution | io.kestra.plugin.notifications.pagerduty.PagerDutyAlert | io.kestra.plugin.notifications.pagerduty.PagerDutyExecution | io.kestra.plugin.notifications.sendgrid.SendGridMailExecution | io.kestra.plugin.notifications.sendgrid.SendGridMailSend | io.kestra.plugin.notifications.sentry.SentryAlert | io.kestra.plugin.notifications.sentry.SentryExecution | io.kestra.plugin.notifications.slack.SlackExecution | io.kestra.plugin.notifications.slack.SlackIncomingWebhook | io.kestra.plugin.notifications.teams.TeamsExecution | io.kestra.plugin.notifications.teams.TeamsIncomingWebhook | io.kestra.plugin.notifications.telegram.TelegramExecution | io.kestra.plugin.notifications.telegram.TelegramSend | io.kestra.plugin.notifications.twilio.TwilioAlert | io.kestra.plugin.notifications.twilio.TwilioExecution | io.kestra.plugin.notifications.whatsapp.WhatsAppExecution | io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook | io.kestra.plugin.notifications.zenduty.ZendutyAlert | io.kestra.plugin.notifications.zenduty.ZendutyExecution | io.kestra.plugin.notifications.zulip.ZulipExecution | io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook | io.kestra.plugin.transform.grok.TransformItems | io.kestra.plugin.transform.grok.TransformValue | io.kestra.plugin.debezium.oracle.Capture | io.kestra.plugin.tika.Parse | io.kestra.plugin.jdbc.db2.Query | io.kestra.plugin.debezium.sqlserver.Capture | io.kestra.plugin.transform.jsonata.TransformItems | io.kestra.plugin.transform.jsonata.TransformValue | io.kestra.plugin.meilisearch.DocumentAdd | io.kestra.plugin.meilisearch.DocumentGet | io.kestra.plugin.meilisearch.FacetSearch | io.kestra.plugin.meilisearch.Search | io.kestra.plugin.jira.issues.Create | io.kestra.plugin.jira.issues.CreateComment | io.kestra.plugin.jira.issues.UpdateFields | io.kestra.plugin.singer.taps.BigQuery | io.kestra.plugin.singer.taps.BingAds | io.kestra.plugin.singer.taps.ChargeBee | io.kestra.plugin.singer.taps.ExchangeRateHost | io.kestra.plugin.singer.taps.FacebookAds | io.kestra.plugin.singer.taps.Fastly | io.kestra.plugin.singer.taps.GenericTap | io.kestra.plugin.singer.taps.GitHub | io.kestra.plugin.singer.taps.Gitlab | io.kestra.plugin.singer.taps.GoogleAdwords | io.kestra.plugin.singer.taps.GoogleAnalytics | io.kestra.plugin.singer.taps.GoogleSearchConsole | io.kestra.plugin.singer.taps.HubSpot | io.kestra.plugin.singer.taps.Marketo | io.kestra.plugin.singer.taps.Netsuite | io.kestra.plugin.singer.taps.PipelinewiseMongoDb | io.kestra.plugin.singer.taps.PipelinewiseMysql | io.kestra.plugin.singer.taps.PipelinewiseOracle | io.kestra.plugin.singer.taps.PipelinewisePostgres | io.kestra.plugin.singer.taps.PipelinewiseSqlServer | io.kestra.plugin.singer.taps.Quickbooks | io.kestra.plugin.singer.taps.Recharge | io.kestra.plugin.singer.taps.SageIntacct | io.kestra.plugin.singer.taps.Salesforce | io.kestra.plugin.singer.taps.Shopify | io.kestra.plugin.singer.taps.Slack | io.kestra.plugin.singer.taps.Stripe | io.kestra.plugin.singer.taps.Zendesk | io.kestra.plugin.singer.taps.Zoom | io.kestra.plugin.singer.targets.AdswerveBigQuery | io.kestra.plugin.singer.targets.Csv | io.kestra.plugin.singer.targets.DatamillCoPostgres | io.kestra.plugin.singer.targets.GenericTarget | io.kestra.plugin.singer.targets.Json | io.kestra.plugin.singer.targets.MeltanoSnowflake | io.kestra.plugin.singer.targets.Oracle | io.kestra.plugin.singer.targets.PipelinewisePostgres | io.kestra.plugin.singer.targets.PipelinewiseRedshift | io.kestra.plugin.singer.targets.PipelinewiseSnowflake | io.kestra.plugin.singer.targets.SqlServer | io.kestra.plugin.gcp.auth.OauthAccessToken | io.kestra.plugin.gcp.bigquery.Copy | io.kestra.plugin.gcp.bigquery.CopyPartitions | io.kestra.plugin.gcp.bigquery.CreateDataset | io.kestra.plugin.gcp.bigquery.CreateTable | io.kestra.plugin.gcp.bigquery.DeleteDataset | io.kestra.plugin.gcp.bigquery.DeletePartitions | io.kestra.plugin.gcp.bigquery.DeleteTable | io.kestra.plugin.gcp.bigquery.ExtractToGcs | io.kestra.plugin.gcp.bigquery.Load | io.kestra.plugin.gcp.bigquery.LoadFromGcs | io.kestra.plugin.gcp.bigquery.Query | io.kestra.plugin.gcp.bigquery.StorageWrite | io.kestra.plugin.gcp.bigquery.TableMetadata | io.kestra.plugin.gcp.bigquery.UpdateDataset | io.kestra.plugin.gcp.bigquery.UpdateTable | io.kestra.plugin.gcp.cli.GCloudCLI | io.kestra.plugin.gcp.dataproc.batches.PySparkSubmit | io.kestra.plugin.gcp.dataproc.batches.RSparkSubmit | io.kestra.plugin.gcp.dataproc.batches.SparkSqlSubmit | io.kestra.plugin.gcp.dataproc.batches.SparkSubmit | io.kestra.plugin.gcp.dataproc.clusters.Create | io.kestra.plugin.gcp.dataproc.clusters.Delete | io.kestra.plugin.gcp.firestore.Delete | io.kestra.plugin.gcp.firestore.Get | io.kestra.plugin.gcp.firestore.Query | io.kestra.plugin.gcp.firestore.Set | io.kestra.plugin.gcp.gcs.Compose | io.kestra.plugin.gcp.gcs.Copy | io.kestra.plugin.gcp.gcs.CreateBucket | io.kestra.plugin.gcp.gcs.CreateBucketIamPolicy | io.kestra.plugin.gcp.gcs.Delete | io.kestra.plugin.gcp.gcs.DeleteBucket | io.kestra.plugin.gcp.gcs.DeleteList | io.kestra.plugin.gcp.gcs.Download | io.kestra.plugin.gcp.gcs.Downloads | io.kestra.plugin.gcp.gcs.List | io.kestra.plugin.gcp.gcs.UpdateBucket | io.kestra.plugin.gcp.gcs.Upload | io.kestra.plugin.gcp.gke.ClusterMetadata | io.kestra.plugin.gcp.pubsub.Consume | io.kestra.plugin.gcp.pubsub.Publish | io.kestra.plugin.gcp.vertexai.ChatCompletion | io.kestra.plugin.gcp.vertexai.CustomJob | io.kestra.plugin.gcp.vertexai.MultimodalCompletion | io.kestra.plugin.gcp.vertexai.TextCompletion | io.kestra.plugin.hubspot.tickets.Create | io.kestra.plugin.terraform.cli.TerraformCLI | io.kestra.plugin.jdbc.duckdb.Query | io.kestra.plugin.powerbi.RefreshGroupDataset | io.kestra.plugin.minio.Copy | io.kestra.plugin.minio.CreateBucket | io.kestra.plugin.minio.Delete | io.kestra.plugin.minio.DeleteList | io.kestra.plugin.minio.Download | io.kestra.plugin.minio.Downloads | io.kestra.plugin.minio.List | io.kestra.plugin.minio.Upload | io.kestra.plugin.spark.JarSubmit | io.kestra.plugin.spark.PythonSubmit | io.kestra.plugin.spark.RSubmit | io.kestra.plugin.spark.SparkCLI | io.kestra.plugin.scripts.powershell.Commands | io.kestra.plugin.scripts.powershell.Script | io.kestra.plugin.jdbc.oracle.Batch | io.kestra.plugin.jdbc.oracle.Query | io.kestra.plugin.elasticsearch.Bulk | io.kestra.plugin.elasticsearch.Get | io.kestra.plugin.elasticsearch.Load | io.kestra.plugin.elasticsearch.Put | io.kestra.plugin.elasticsearch.Request | io.kestra.plugin.elasticsearch.Scroll | io.kestra.plugin.elasticsearch.Search | io.kestra.plugin.cloudquery.CloudQueryCLI | io.kestra.plugin.cloudquery.Sync | io.kestra.plugin.openai.ChatCompletion | io.kestra.plugin.openai.CreateImage | io.kestra.plugin.amqp.Consume | io.kestra.plugin.amqp.CreateQueue | io.kestra.plugin.amqp.DeclareExchange | io.kestra.plugin.amqp.Publish | io.kestra.plugin.amqp.QueueBind | io.kestra.plugin.scripts.r.Commands | io.kestra.plugin.scripts.r.Script | io.kestra.plugin.hightouch.Sync | io.kestra.plugin.ldap.Add | io.kestra.plugin.ldap.Delete | io.kestra.plugin.ldap.IonToLdif | io.kestra.plugin.ldap.LdifToIon | io.kestra.plugin.ldap.Modify | io.kestra.plugin.ldap.Search | io.kestra.plugin.cassandra.astradb.Query | io.kestra.plugin.cassandra.standard.Query | io.kestra.plugin.malloy.CLI | io.kestra.plugin.fivetran.connectors.Sync | io.kestra.plugin.mongodb.Bulk | io.kestra.plugin.mongodb.Delete | io.kestra.plugin.mongodb.Find | io.kestra.plugin.mongodb.InsertOne | io.kestra.plugin.mongodb.Load | io.kestra.plugin.mongodb.Update | io.kestra.plugin.redis.list.ListPop | io.kestra.plugin.redis.list.ListPush | io.kestra.plugin.redis.pubsub.Publish | io.kestra.plugin.redis.string.Delete | io.kestra.plugin.redis.string.Get | io.kestra.plugin.redis.string.Set | io.kestra.plugin.zendesk.tickets.Create | io.kestra.plugin.databricks.cluster.CreateCluster | io.kestra.plugin.databricks.cluster.DeleteCluster | io.kestra.plugin.databricks.dbfs.Download | io.kestra.plugin.databricks.dbfs.Upload | io.kestra.plugin.databricks.job.CreateJob | io.kestra.plugin.databricks.job.SubmitRun | io.kestra.plugin.databricks.sql.Query | io.kestra.plugin.linear.issues.Create | io.kestra.plugin.airbyte.cloud.jobs.Reset | io.kestra.plugin.airbyte.cloud.jobs.Sync | io.kestra.plugin.airbyte.connections.CheckStatus | io.kestra.plugin.airbyte.connections.Sync | io.kestra.plugin.jdbc.trino.Query | io.kestra.plugin.jdbc.sybase.Query | io.kestra.plugin.airflow.dags.TriggerDagRun | io.kestra.plugin.scripts.jbang.Commands | io.kestra.plugin.scripts.jbang.Script | io.kestra.plugin.scripts.groovy.Eval | io.kestra.plugin.scripts.groovy.FileTransform | io.kestra.plugin.dataform.cli.DataformCLI | io.kestra.plugin.jdbc.sqlserver.Batch | io.kestra.plugin.jdbc.sqlserver.Query | io.kestra.plugin.jdbc.clickhouse.BulkInsert | io.kestra.plugin.jdbc.clickhouse.ClickHouseLocalCLI | io.kestra.plugin.jdbc.clickhouse.Query | io.kestra.plugin.jdbc.druid.Query | io.kestra.plugin.github.code.Search | io.kestra.plugin.github.commits.Search | io.kestra.plugin.github.issues.Comment | io.kestra.plugin.github.issues.Create | io.kestra.plugin.github.issues.Search | io.kestra.plugin.github.pulls.Create | io.kestra.plugin.github.pulls.Search | io.kestra.plugin.github.repositories.Search | io.kestra.plugin.github.topics.Search | io.kestra.plugin.github.users.Search | io.kestra.plugin.soda.Scan | io.kestra.plugin.docker.Build | io.kestra.plugin.docker.Run | io.kestra.plugin.servicenow.Post | io.kestra.plugin.fs.ftp.Delete | io.kestra.plugin.fs.ftp.Download | io.kestra.plugin.fs.ftp.Downloads | io.kestra.plugin.fs.ftp.List | io.kestra.plugin.fs.ftp.Move | io.kestra.plugin.fs.ftp.Upload | io.kestra.plugin.fs.ftp.Uploads | io.kestra.plugin.fs.ftps.Delete | io.kestra.plugin.fs.ftps.Download | io.kestra.plugin.fs.ftps.Downloads | io.kestra.plugin.fs.ftps.List | io.kestra.plugin.fs.ftps.Move | io.kestra.plugin.fs.ftps.Upload | io.kestra.plugin.fs.ftps.Uploads | io.kestra.plugin.fs.sftp.Delete | io.kestra.plugin.fs.sftp.Download | io.kestra.plugin.fs.sftp.Downloads | io.kestra.plugin.fs.sftp.List | io.kestra.plugin.fs.sftp.Move | io.kestra.plugin.fs.sftp.Upload | io.kestra.plugin.fs.sftp.Uploads | io.kestra.plugin.fs.smb.Delete | io.kestra.plugin.fs.smb.Download | io.kestra.plugin.fs.smb.Downloads | io.kestra.plugin.fs.smb.List | io.kestra.plugin.fs.smb.Move | io.kestra.plugin.fs.smb.Upload | io.kestra.plugin.fs.smb.Uploads | io.kestra.plugin.fs.ssh.Command | io.kestra.plugin.crypto.openpgp.Decrypt | io.kestra.plugin.crypto.openpgp.Encrypt | io.kestra.plugin.weaviate.BatchCreate | io.kestra.plugin.weaviate.Delete | io.kestra.plugin.weaviate.Query | io.kestra.plugin.weaviate.SchemaCreate | io.kestra.core.tasks.scripts.Node | io.kestra.plugin.scripts.node.Commands | io.kestra.plugin.scripts.node.Script | io.kestra.plugin.jdbc.mysql.Batch | io.kestra.plugin.jdbc.mysql.Query | io.kestra.plugin.modal.cli.ModalCLI | io.kestra.plugin.jdbc.vectorwise.Batch | io.kestra.plugin.jdbc.vectorwise.Query | io.kestra.plugin.jdbc.redshift.Query | io.kestra.plugin.kubernetes.PodCreate | io.kestra.plugin.kubernetes.kubectl.Apply | io.kestra.plugin.jdbc.postgresql.Batch | io.kestra.plugin.jdbc.postgresql.CopyIn | io.kestra.plugin.jdbc.postgresql.CopyOut | io.kestra.plugin.jdbc.postgresql.Query | io.kestra.plugin.mqtt.Publish | io.kestra.plugin.mqtt.Subscribe | io.kestra.plugin.sqlmesh.cli.SQLMeshCLI | io.kestra.plugin.couchbase.Query | io.kestra.plugin.scripts.julia.Commands | io.kestra.plugin.scripts.julia.Script | io.kestra.plugin.jdbc.dremio.Query | io.kestra.plugin.googleworkspace.drive.Create | io.kestra.plugin.googleworkspace.drive.Delete | io.kestra.plugin.googleworkspace.drive.Download | io.kestra.plugin.googleworkspace.drive.Export | io.kestra.plugin.googleworkspace.drive.List | io.kestra.plugin.googleworkspace.drive.Upload | io.kestra.plugin.googleworkspace.sheets.CreateSpreadsheet | io.kestra.plugin.googleworkspace.sheets.DeleteSpreadsheet | io.kestra.plugin.googleworkspace.sheets.Load | io.kestra.plugin.googleworkspace.sheets.Read | io.kestra.plugin.googleworkspace.sheets.ReadRange | io.kestra.plugin.dbt.cli.Build | io.kestra.plugin.dbt.cli.Compile | io.kestra.plugin.dbt.cli.DbtCLI | io.kestra.plugin.dbt.cli.Deps | io.kestra.plugin.dbt.cli.Freshness | io.kestra.plugin.dbt.cli.List | io.kestra.plugin.dbt.cli.Run | io.kestra.plugin.dbt.cli.Seed | io.kestra.plugin.dbt.cli.Setup | io.kestra.plugin.dbt.cli.Snapshot | io.kestra.plugin.dbt.cli.Test | io.kestra.plugin.dbt.cloud.CheckStatus | io.kestra.plugin.dbt.cloud.TriggerRun | io.kestra.plugin.scripts.nashorn.Eval | io.kestra.plugin.scripts.nashorn.FileTransform | io.kestra.plugin.aws.athena.Query | io.kestra.plugin.aws.cli.AwsCLI | io.kestra.plugin.aws.dynamodb.DeleteItem | io.kestra.plugin.aws.dynamodb.GetItem | io.kestra.plugin.aws.dynamodb.PutItem | io.kestra.plugin.aws.dynamodb.Query | io.kestra.plugin.aws.dynamodb.Scan | io.kestra.plugin.aws.ecr.GetAuthToken | io.kestra.plugin.aws.eventbridge.PutEvents | io.kestra.plugin.aws.kinesis.PutRecords | io.kestra.plugin.aws.lambda.Invoke | io.kestra.plugin.aws.s3.Copy | io.kestra.plugin.aws.s3.CreateBucket | io.kestra.plugin.aws.s3.Delete | io.kestra.plugin.aws.s3.DeleteList | io.kestra.plugin.aws.s3.Download | io.kestra.plugin.aws.s3.Downloads | io.kestra.plugin.aws.s3.List | io.kestra.plugin.aws.s3.Upload | io.kestra.plugin.aws.sns.Publish | io.kestra.plugin.aws.sqs.Consume | io.kestra.plugin.aws.sqs.Publish | io.kestra.plugin.compress.ArchiveCompress | io.kestra.plugin.compress.ArchiveDecompress | io.kestra.plugin.compress.FileCompress | io.kestra.plugin.compress.FileDecompress | io.kestra.core.tasks.scripts.Python | io.kestra.plugin.scripts.python.Commands | io.kestra.plugin.scripts.python.Script | io.kestra.plugin.debezium.postgres.Capture | io.kestra.plugin.azure.batch.job.Create | io.kestra.plugin.azure.batch.pool.Resize | io.kestra.plugin.azure.cli.AzCLI | io.kestra.plugin.azure.datafactory.CreateRun | io.kestra.plugin.azure.eventhubs.Consume | io.kestra.plugin.azure.eventhubs.Produce | io.kestra.plugin.azure.storage.blob.Copy | io.kestra.plugin.azure.storage.blob.Delete | io.kestra.plugin.azure.storage.blob.DeleteList | io.kestra.plugin.azure.storage.blob.Download | io.kestra.plugin.azure.storage.blob.Downloads | io.kestra.plugin.azure.storage.blob.List | io.kestra.plugin.azure.storage.blob.SharedAccess | io.kestra.plugin.azure.storage.blob.Upload | io.kestra.plugin.azure.storage.table.Bulk | io.kestra.plugin.azure.storage.table.Delete | io.kestra.plugin.azure.storage.table.Get | io.kestra.plugin.azure.storage.table.List | io.kestra.plugin.kafka.Consume | io.kestra.plugin.kafka.Produce | io.kestra.plugin.jdbc.snowflake.Download | io.kestra.plugin.jdbc.snowflake.Query | io.kestra.plugin.jdbc.snowflake.Upload[] required
minItems=1
conditions io.kestra.plugin.core.condition.DateTimeBetweenCondition | io.kestra.plugin.core.condition.DayWeekCondition | io.kestra.plugin.core.condition.DayWeekInMonthCondition | io.kestra.plugin.core.condition.ExecutionFlowCondition | io.kestra.plugin.core.condition.ExecutionLabelsCondition | io.kestra.plugin.core.condition.ExecutionNamespaceCondition | io.kestra.plugin.core.condition.ExecutionOutputsCondition | io.kestra.plugin.core.condition.ExecutionStatusCondition | io.kestra.plugin.core.condition.ExpressionCondition | io.kestra.plugin.core.condition.FlowCondition | io.kestra.plugin.core.condition.FlowNamespaceCondition | io.kestra.plugin.core.condition.HasRetryAttemptCondition | io.kestra.plugin.core.condition.MultipleCondition | io.kestra.plugin.core.condition.NotCondition | io.kestra.plugin.core.condition.OrCondition | io.kestra.plugin.core.condition.PublicHolidayCondition | io.kestra.plugin.core.condition.TimeBetweenCondition | io.kestra.plugin.core.condition.WeekendCondition[]
description string
io.kestra.core.models.property.Data_java.util.Map_ object
fromList object[]
fromMap object
fromURI string | string
io.kestra.core.models.tasks.NamespaceFiles object
enabled boolean

Default value is : true

Default: true
exclude string[]
include string[]
io.kestra.core.models.tasks.WorkerGroup object
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.core.models.tasks.retrys.Constant-1 object
interval string required
format=duration
behavior string

Default value is : RETRY_FAILED_TASK

Default: "RETRY_FAILED_TASK"
Values: "RETRY_FAILED_TASK" "CREATE_NEW_EXECUTION"
maxAttempt integer
min=1
maxDuration string
format=duration
type string

Default value is : constant

Default: "constant"
warningOnRetry boolean

Default value is : false

Default: false
io.kestra.core.models.tasks.retrys.Constant-2
io.kestra.core.models.tasks.retrys.Exponential-1 object
interval string required
format=duration
maxInterval string required
format=duration
behavior string

Default value is : RETRY_FAILED_TASK

Default: "RETRY_FAILED_TASK"
Values: "RETRY_FAILED_TASK" "CREATE_NEW_EXECUTION"
delayFactor number
maxAttempt integer
min=1
maxDuration string
format=duration
type string

Default value is : exponential

Default: "exponential"
warningOnRetry boolean

Default value is : false

Default: false
io.kestra.core.models.tasks.retrys.Exponential-2
io.kestra.core.models.tasks.retrys.Random-1 object
maxInterval string required
format=duration
minInterval string required
format=duration
behavior string

Default value is : RETRY_FAILED_TASK

Default: "RETRY_FAILED_TASK"
Values: "RETRY_FAILED_TASK" "CREATE_NEW_EXECUTION"
maxAttempt integer
min=1
maxDuration string
format=duration
type string

Default value is : random

Default: "random"
warningOnRetry boolean

Default value is : false

Default: false
io.kestra.core.models.tasks.retrys.Random-2
io.kestra.core.services.FlowService object
io.kestra.core.tasks.scripts.Bash object

This task is deprecated, please use the io.kestra.plugin.scripts.shell.Script or io.kestra.plugin.scripts.shell.Commands task instead.##### Examples

Single bash command.

id: bash_single_command
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.core.tasks.scripts.Bash
    commands:
      - 'echo "The current execution is : {{ execution.id }}"'

Bash command that generate file in storage accessible through outputs.

id: bash_generate_files
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.core.tasks.scripts.Bash
    outputFiles:
      - first
      - second
    commands:
      - echo "1" >> {{ outputFiles.first }}
      - echo "2" >> {{ outputFiles.second }}

Bash with some inputs files.

id: bash_input_files
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.core.tasks.scripts.Bash
    inputFiles:
      script.sh: |
        echo {{ workingDir }}
    commands:
      - /bin/bash script.sh

Bash with an input file from Kestra's local storage created by a previous task.

id: bash_use_input_files
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.core.tasks.scripts.Bash
    inputFiles:
      data.csv: {{ outputs.previousTaskId.uri }}
    commands:
      - cat data.csv

Run a command on a Docker image.

id: bash_run_php_code
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.core.tasks.scripts.Bash
    runner: DOCKER
    dockerOptions:
      image: php
    commands:
      - php -r 'print(phpversion() . "
");'

Execute cmd on Windows.

id: bash_run_cmd_on_windows
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.core.tasks.scripts.Bash
    commands:
      - 'echo "The current execution is : {{ execution.id }}"'
    exitOnFailed: false
    interpreter: cmd
    interpreterArgs:
      - /c

Set outputs from bash standard output.

id: bash_set_outputs
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.core.tasks.scripts.Bash
    commands:
      - echo '::{"outputs":{"test":"value","int":2,"bool":true,"float":3.65}}::'

Send a counter metric from bash standard output.

id: bash_set_metrics
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.core.tasks.scripts.Bash
    commands:
      - echo '::{"metrics":[{"name":"count","type":"counter","value":1,"tags":{"tag1":"i","tag2":"win"}}]}::'

commands string[] required

Default command will be launched with /bin/sh -c "commands".

minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.core.tasks.scripts.Bash" required
Constant: "io.kestra.core.tasks.scripts.Bash"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
dockerOptions
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Docker options when using the `DOCKER` runner.
env Record<string, string>
exitOnFailed boolean

This tells bash that it should exit the script if any statement returns a non-true return value. Setting this to true helps catch cases where a command fails and the script continues to run anyway.

Default value is : true

Default: true
files string[]

Use outputFiles instead.

inputFiles Record<string, string>

Define the files as a map of a file name being the key, and the value being the file's content. Alternatively, configure the files as a JSON string with the same key/value structure as the map. In both cases, you can either specify the file's content inline, or reference a file from Kestra's internal storage by its URI, e.g. a file from an input, output of a previous task, or a Namespace File.

interpreter string

Default value is : /bin/sh

Default: "/bin/sh"
minLength=1
interpreterArgs string[]

Default value is : - -c

Default value is : - -c

Default:
[
  "-c"
]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
outputDirs string[]

List of keys that will generate temporary directories. This property can be used with a special variable named outputDirs.key. If you add a file with ["myDir"], you can use the special var echo 1 >> {[ outputDirs.myDir }}/file1.txt and echo 2 >> {[ outputDirs.myDir }}/file2.txt, and both the files will be uploaded to Kestra's internal storage. You can reference them in other tasks using {{ outputs.taskId.outputFiles['myDir/file1.txt'] }}.

outputFiles string[]

List of keys that will generate temporary files. This property can be used with a special variable named outputFiles.key. If you add a file with ["first"], you can use the special var echo 1 >> {[ outputFiles.first }}, and on other tasks, you can reference it using {{ outputs.taskId.outputFiles.first }}.

outputsFiles string[]

Use outputFiles instead.

runner string

Default value is : PROCESS

Default: "PROCESS"
Values: "PROCESS" "DOCKER"
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.core.tasks.scripts.Node object

This task is deprecated, please use the io.kestra.plugin.scripts.node.Script or io.kestra.plugin.scripts.node.Commands task instead.

With the Node task, you can execute a full JavaScript script. The task will create a temporary folder for each task, and allows you to install some npm packages defined in an optional package.json file.

By convention, you need to define at least a main.js file in inputFiles that will be the script used. You can also add as many JavaScript files as you need in inputFiles.

The outputs & metrics from your Node.js script can be used by others tasks. In order to make things easy, we inject a node package directly on the working directory.Here is an example usage:

const Kestra = require("./kestra");
Kestra.outputs({test: 'value', int: 2, bool: true, float: 3.65});
Kestra.counter('count', 1, {tag1: 'i', tag2: 'win'});
Kestra.timer('timer1', (callback) => { setTimeout(callback, 1000) }, {tag1: 'i', tag2: 'lost'});
Kestra.timer('timer2', 2.12, {tag1: 'i', tag2: 'destroy'});
```##### Examples
> Execute a Node.js script.
```yaml
inputFiles:
  main.js: |
    const Kestra = require("./kestra");
    const fs = require('fs')
    const result = fs.readFileSync(process.argv[2], "utf-8")
    console.log(JSON.parse(result).status)
    const axios = require('axios')
    axios.get('http://google.fr').then(d => { console.log(d.status); Kestra.outputs({'status': d.status, 'text': d.data})})
    console.log(require('./mymodule').value)
  data.json: |
    {"status": "OK"}
  mymodule.js: |
    module.exports.value = 'hello world'
  package.json: |
    {
      "name": "tmp",
      "version": "1.0.0",
      "description": "",
      "main": "index.js",
      "dependencies": {
          "axios": "^0.20.0"
      },
      "devDependencies": {},
      "scripts": {
          "test": "echo `Error: no test specified` && exit 1"
      },
      "author": "",
      "license": "ISC"
    }
args:
  - data.json
warningOnStdErr: false

Execute a Node.js script with an input file from Kestra's internal storage created by a previous task.

inputFiles:
  data.csv: {{ outputs.previousTaskId.uri }}
  main.js: |
    const fs = require('fs')
    const result = fs.readFileSync('data.csv', 'utf-8')
    console.log(result)
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.core.tasks.scripts.Node" required
Constant: "io.kestra.core.tasks.scripts.Node"
allowFailure boolean

Default value is : false

Default: false
args string[]

Arguments list to pass to main JavaScript script.

description string
disabled boolean

Default value is : false

Default: false
dockerOptions
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Docker options when using the `DOCKER` runner.
env Record<string, string>
exitOnFailed boolean

This tells bash that it should exit the script if any statement returns a non-true return value. Setting this to true helps catch cases where a command fails and the script continues to run anyway.

Default value is : true

Default: true
files string[]

Use outputFiles instead.

inputFiles Record<string, string>

Define the files as a map of a file name being the key, and the value being the file's content. Alternatively, configure the files as a JSON string with the same key/value structure as the map. In both cases, you can either specify the file's content inline, or reference a file from Kestra's internal storage by its URI, e.g. a file from an input, output of a previous task, or a Namespace File.

interpreter string

Default value is : /bin/sh

Default: "/bin/sh"
minLength=1
interpreterArgs string[]

Default value is : - -c

Default value is : - -c

Default:
[
  "-c"
]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
nodePath string

Set the node interpreter path to use.

Default value is : node

Default: "node"
npmPath string

Set the npm binary path for node dependencies setup.

Default value is : npm

Default: "npm"
outputDirs string[]

List of keys that will generate temporary directories. This property can be used with a special variable named outputDirs.key. If you add a file with ["myDir"], you can use the special var echo 1 >> {[ outputDirs.myDir }}/file1.txt and echo 2 >> {[ outputDirs.myDir }}/file2.txt, and both the files will be uploaded to Kestra's internal storage. You can reference them in other tasks using {{ outputs.taskId.outputFiles['myDir/file1.txt'] }}.

outputFiles string[]

List of keys that will generate temporary files. This property can be used with a special variable named outputFiles.key. If you add a file with ["first"], you can use the special var echo 1 >> {[ outputFiles.first }}, and on other tasks, you can reference it using {{ outputs.taskId.outputFiles.first }}.

outputsFiles string[]

Use outputFiles instead.

runner string

Default value is : PROCESS

Default: "PROCESS"
Values: "PROCESS" "DOCKER"
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.core.tasks.scripts.Python object

This task is deprecated, please use the io.kestra.plugin.scripts.python.Script or io.kestra.plugin.scripts.python.Commands task instead.

With the Python task, you can execute a full Python script. The task will create a fresh virtualenv for every tasks and allows to install some Python package define in requirements property.

By convention, you need to define at least a main.py files in inputFiles that will be the script used. But you are also able to add as many script as you need in inputFiles.

You can also add a pip.conf in inputFiles to customize the pip download of dependencies (like a private registry).

You can send outputs & metrics from your python script that can be used by others tasks. In order to help, we inject a python package directly on the working dir.Here is an example usage:

from kestra import Kestra
Kestra.outputs({'test': 'value', 'int': 2, 'bool': True, 'float': 3.65})
Kestra.counter('count', 1, {'tag1': 'i', 'tag2': 'win'})
Kestra.timer('timer1', lambda: time.sleep(1), {'tag1': 'i', 'tag2': 'lost'})
Kestra.timer('timer2', 2.12, {'tag1': 'i', 'tag2': 'destroy'})
```##### Examples
> Execute a python script.
```yaml
id: python_flow
namespace: company.team

tasks:
  - id: python
    type: io.kestra.core.tasks.scripts.Python
    inputFiles:
      data.json: |
        {"status": "OK"}
      main.py: |
        from kestra import Kestra
        import json
        import requests
        import sys
        result = json.loads(open(sys.argv[1]).read())
        print(f"python script {result['status']}")
        response = requests.get('http://google.com')
        print(response.status_code)
        Kestra.outputs({'status': response.status_code, 'text': response.text})
      pip.conf: |
        # some specific pip repository configuration
    args:
      - data.json
    requirements:
      - requests

Execute a python script with an input file from Kestra's local storage created by a previous task.

id: python_flow
namespace: company.team

tasks:
  - id: python
    type: io.kestra.core.tasks.scripts.Python
    inputFiles:
      data.csv: {{outputs.previousTaskId.uri}}
      main.py: |
        with open('data.csv', 'r') as f:
          print(f.read())

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.core.tasks.scripts.Python" required
Constant: "io.kestra.core.tasks.scripts.Python"
allowFailure boolean

Default value is : false

Default: false
args string[]

Arguments list to pass to main python script

commands string[]

Default command will be launched with ./bin/python main.py {{args}}

Default value is : - ./bin/python main.py

Default value is : - ./bin/python main.py

Default:
[
  "./bin/python main.py"
]
minItems=1
description string
disabled boolean

Default value is : false

Default: false
dockerOptions
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Docker options when using the `DOCKER` runner.
env Record<string, string>
exitOnFailed boolean

This tells bash that it should exit the script if any statement returns a non-true return value. Setting this to true helps catch cases where a command fails and the script continues to run anyway.

Default value is : true

Default: true
files string[]

Use outputFiles instead.

inputFiles Record<string, string>

Define the files as a map of a file name being the key, and the value being the file's content. Alternatively, configure the files as a JSON string with the same key/value structure as the map. In both cases, you can either specify the file's content inline, or reference a file from Kestra's internal storage by its URI, e.g. a file from an input, output of a previous task, or a Namespace File.

interpreter string

Default value is : /bin/sh

Default: "/bin/sh"
minLength=1
interpreterArgs string[]

Default value is : - -c

Default value is : - -c

Default:
[
  "-c"
]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
outputDirs string[]

List of keys that will generate temporary directories. This property can be used with a special variable named outputDirs.key. If you add a file with ["myDir"], you can use the special var echo 1 >> {[ outputDirs.myDir }}/file1.txt and echo 2 >> {[ outputDirs.myDir }}/file2.txt, and both the files will be uploaded to Kestra's internal storage. You can reference them in other tasks using {{ outputs.taskId.outputFiles['myDir/file1.txt'] }}.

outputFiles string[]

List of keys that will generate temporary files. This property can be used with a special variable named outputFiles.key. If you add a file with ["first"], you can use the special var echo 1 >> {[ outputFiles.first }}, and on other tasks, you can reference it using {{ outputs.taskId.outputFiles.first }}.

outputsFiles string[]

Use outputFiles instead.

pythonPath string

Set the python interpreter path to use

Default value is : python

Default: "python"
minLength=1
requirements string[]

Python dependencies list to setup in the virtualenv, in the same format than requirements.txt

runner string

Default value is : PROCESS

Default: "PROCESS"
Values: "PROCESS" "DOCKER"
timeout string
format=duration
virtualEnv boolean

When a virtual env is created, we will install the requirements needed. Disabled it if all the requirements is already on the file system. If you disabled the virtual env creation, the requirements will be ignored.

Default value is : true

Default: true
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.airbyte.cloud.jobs.Reset object
Examples
id: airbyte_reset
namespace: company.team

tasks:
  - id: reset
    type: io.kestra.plugin.airbyte.cloud.jobs.Reset
    token: <token>
    connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12cd

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.airbyte.cloud.jobs.Reset" required
Constant: "io.kestra.plugin.airbyte.cloud.jobs.Reset"
allowFailure boolean

Default value is : false

Default: false
connectionId string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 3600.000000000

Default: 3600.0
format=duration
password string
pollFrequency string

Default value is : 1.000000000

Default: 1.0
format=duration
timeout string
format=duration
token string
username string
wait boolean

Allowing capture of job status & logs.

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.airbyte.cloud.jobs.Sync object
Examples
id: airbyte_sync
namespace: company.team

tasks:
  - id: sync
    type: io.kestra.plugin.airbyte.cloud.jobs.Sync
    token: <token>
    connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12cd

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.airbyte.cloud.jobs.Sync" required
Constant: "io.kestra.plugin.airbyte.cloud.jobs.Sync"
allowFailure boolean

Default value is : false

Default: false
connectionId string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 3600.000000000

Default: 3600.0
format=duration
password string
pollFrequency string

Default value is : 1.000000000

Default: 1.0
format=duration
timeout string
format=duration
token string
username string
wait boolean

Allowing capture of job status & logs.

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.airbyte.connections.CheckStatus object
Examples
id: airbyte_check_status
namespace: company.team

tasks:
  - id: "check_status"
    type: "io.kestra.plugin.airbyte.connections.CheckStatus"
    url: http://localhost:8080
    jobId: 970

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.airbyte.connections.CheckStatus" required
Constant: "io.kestra.plugin.airbyte.connections.CheckStatus"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
httpTimeout string

Default value is : 10.000000000

Default: 10.0
format=duration
jobId string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 3600.000000000

Default: 3600.0
format=duration
password string
pollFrequency string

Default value is : 1.000000000

Default: 1.0
format=duration
timeout string
format=duration
token string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.airbyte.connections.Sync object
Examples
id: airbyte_sync
namespace: company.team

tasks:
  - id: sync
    type: io.kestra.plugin.airbyte.connections.Sync
    url: http://localhost:8080
    connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12cd

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.airbyte.connections.Sync" required
Constant: "io.kestra.plugin.airbyte.connections.Sync"
url string required
allowFailure boolean

Default value is : false

Default: false
connectionId string
description string
disabled boolean

Default value is : false

Default: false
failOnActiveSync boolean

Default value is : true

Default: true
httpTimeout string

Default value is : 10.000000000

Default: 10.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 3600.000000000

Default: 3600.0
format=duration
password string
pollFrequency string

Default value is : 1.000000000

Default: 1.0
format=duration
timeout string
format=duration
token string
username string
wait boolean

Allowing capture of job status & logs.

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.airflow.dags.TriggerDagRun object

Launch a DAG run, optionally wait for its completion and return the final state of the DAG run.##### Examples

Trigger a DAG run with custom inputs, and authenticate with basic authentication

id: airflow
namespace: company.team

tasks:
  - id: run_dag
    type: io.kestra.plugin.airflow.dags.TriggerDagRun
    baseUrl: http://host.docker.internal:8080
    dagId: example_astronauts
    wait: true
    pollFrequency: PT1S
    options:
      basicAuthUser: "{{ secret('AIRFLOW_USERNAME') }}"
      basicAuthPassword: "{{ secret('AIRFLOW_PASSWORD') }}"
    body:
      conf:
        source: kestra
        namespace: "{{ flow.namespace }}"
        flow: "{{ flow.id }}"
        task: "{{ task.id }}"
        execution: "{{ execution.id }}"

Trigger a DAG run with custom inputs, and authenticate with a Bearer token

id: airflow_header_authorization
namespace: company.team

tasks:
  - id: run_dag
    type: io.kestra.plugin.airflow.dags.TriggerDagRun
    baseUrl: http://host.docker.internal:8080
    dagId: example_astronauts
    wait: true
    headers:
      authorization: "Bearer {{ secret('AIRFLOW_TOKEN') }}"

baseUrl string required
dagId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.airflow.dags.TriggerDagRun" required
Constant: "io.kestra.plugin.airflow.dags.TriggerDagRun"
allowFailure boolean

Default value is : false

Default: false
body object
description string
disabled boolean

Default value is : false

Default: false
headers object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 3600.000000000

Default: 3600.0
format=duration
options
pollFrequency string

Default value is : 1.000000000

Default: 1.0
format=duration
timeout string
format=duration
wait boolean

Default value is false

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.amqp.Consume object

Requires maxDuration or maxRecords.##### Examples

id: amqp_consume
namespace: company.team

tasks:
  - id: consume
    type: io.kestra.plugin.amqp.Consume
    url: amqp://guest:guest@localhost:5672/my_vhost
    queue: kestramqp.queue
    maxRecords: 1000

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queue string required
type const: "io.kestra.plugin.amqp.Consume" required
Constant: "io.kestra.plugin.amqp.Consume"
allowFailure boolean

Default value is : false

Default: false
consumerTag string

Default value is : Kestra

Default: "Kestra"
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

password string
port string
serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
timeout string
format=duration
username string
virtualHost string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.amqp.CreateQueue object

Create a queue, including specified arguments.##### Examples

id: amqp_create_queue
namespace: company.team

tasks:
  - id: create_queue
    type: io.kestra.plugin.amqp.CreateQueue
    url: amqp://guest:guest@localhost:5672/my_vhost
    name: kestramqp.queue

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.amqp.CreateQueue" required
Constant: "io.kestra.plugin.amqp.CreateQueue"
allowFailure boolean

Default value is : false

Default: false
args object
autoDelete boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
durability boolean

Default value is : true

Default: true
exclusive boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string
timeout string
format=duration
username string
virtualHost string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.amqp.DeclareExchange object
Examples
id: amqp_declare_exchange
namespace: company.team

tasks:
  - id: declare_exchange
    type: io.kestra.plugin.amqp.DeclareExchange
    url: amqp://guest:guest@localhost:5672/my_vhost
    name: kestramqp.exchange

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.amqp.DeclareExchange" required
Constant: "io.kestra.plugin.amqp.DeclareExchange"
allowFailure boolean

Default value is : false

Default: false
args object
autoDelete boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
durability boolean

Default value is : true

Default: true
exchangeType string

Default value is : DIRECT

Default: "DIRECT"
Values: "DIRECT" "FANOUT" "TOPIC" "HEADERS"
host string
internal boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string
timeout string
format=duration
username string
virtualHost string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.amqp.Publish object
Examples
id: amqp_publish
namespace: company.team

tasks:
  - id: publish
    type: io.kestra.plugin.amqp.Publish
    url: amqp://guest:guest@localhost:5672/my_vhost
    exchange: kestramqp.exchange
    from:
      - data: value-1
        headers:
            testHeader: KestraTest
        timestamp: '2023-01-09T08:46:33.103130753Z'
      - data: value-2
        timestamp: '2023-01-09T08:46:33.115456977Z'
        appId: unit-kestra

exchange string required
from string | array required

It can be a Kestra's internal storage URI or a list.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.amqp.Publish" required
Constant: "io.kestra.plugin.amqp.Publish"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string
routingKey string
serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
timeout string
format=duration
username string
virtualHost string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.amqp.QueueBind object
Examples
id: amqp_queue_bind
namespace: company.team

tasks:
  - id: queue_bind
    type: io.kestra.plugin.amqp.QueueBind
    url: amqp://guest:guest@localhost:5672/my_vhost
    exchange: kestramqp.exchange
    queue: kestramqp.queue

exchange string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queue string required
type const: "io.kestra.plugin.amqp.QueueBind" required
Constant: "io.kestra.plugin.amqp.QueueBind"
allowFailure boolean

Default value is : false

Default: false
args object
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string
routingKey string
timeout string
format=duration
username string
virtualHost string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.amqp.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.amqp.Trigger instead.##### Examples

Consume a message from a AMQP queue in real-time.

id: amqp
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: realtime_trigger
    type: io.kestra.plugin.amqp.RealtimeTrigger
    url: amqp://guest:guest@localhost:5672/my_vhost
    queue: amqpTrigger.queue

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queue string required
type const: "io.kestra.plugin.amqp.RealtimeTrigger" required
Constant: "io.kestra.plugin.amqp.RealtimeTrigger"
conditions array
consumerTag string

Default value is : Kestra

Default: "Kestra"
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string
serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
stopAfter string[]
username string
virtualHost string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.amqp.Trigger object

Note that you don't need an extra task to consume the message from the event trigger. The trigger will automatically consume messages and you can retrieve their content in your flow using the {{ trigger.uri }} variable. If you would like to consume each message from a AMQP queue in real-time and create one execution per message, you can use the io.kestra.plugin.amqp.RealtimeTrigger instead.##### Examples

id: amqp_trigger
namespace: company.team

tasks:
  - id: trigger
    type: io.kestra.plugin.amqp.Trigger
    url: amqp://guest:guest@localhost:5672/my_vhost
    maxRecords: 2
    queue: amqpTrigger.queue

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queue string required
type const: "io.kestra.plugin.amqp.Trigger" required
Constant: "io.kestra.plugin.amqp.Trigger"
conditions array
consumerTag string

Default value is : Kestra

Default: "Kestra"
description string
disabled boolean

Default value is : false

Default: false
host string
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

password string
port string
serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
stopAfter string[]
username string
virtualHost string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.ansible.cli.AnsibleCLI object
Examples

Execute a list of Ansible CLI commands to orchestrate an Ansible playbook stored in the Editor using Namespace Files.

id: ansible
namespace: company.team

tasks:
  - id: ansible_task
    type: io.kestra.plugin.ansible.cli.AnsibleCLI
    inputFiles:
      inventory.ini: "{{ read('inventory.ini') }}"
      myplaybook.yml: "{{ read('myplaybook.yml') }}"
    docker:
      image: cytopia/ansible:latest-tools
    commands:
      - ansible-playbook -i inventory.ini myplaybook.yml

Execute a list of Ansible CLI commands to orchestrate an Ansible playbook defined inline in the flow definition.

id: ansible
namespace: company.team

tasks:
  - id: ansible_task
    type: io.kestra.plugin.ansible.cli.AnsibleCLI
    inputFiles:
      inventory.ini: |
        localhost ansible_connection=local
      myplaybook.yml: |
        ---
        - hosts: localhost
          tasks:
            - name: Print Hello World
              debug:
                msg: "Hello, World!"
    docker:
      image: cytopia/ansible:latest-tools
    commands:
      - ansible-playbook -i inventory.ini myplaybook.yml
commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.ansible.cli.AnsibleCLI" required
Constant: "io.kestra.plugin.ansible.cli.AnsibleCLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : cytopia/ansible:latest-tools

Default: "cytopia/ansible:latest-tools"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.athena.Query object

The query will wait for completion, except if fetchMode is set to NONE, and will output converted rows. Row conversion is based on the types listed here. Complex data types like array, map and struct will be converted to a string.##### Examples

id: aws_athena_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.aws.athena.Query
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    database: my_database
    outputLocation: s3://some-s3-bucket
    query: |
      select * from cloudfront_logs limit 10

database string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
outputLocation string required

The query results will be stored in this output location. Must be an existing S3 bucket.

query string required
type const: "io.kestra.plugin.aws.athena.Query" required
Constant: "io.kestra.plugin.aws.athena.Query"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
catalog string
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

fetchType string

FETCH_ONE outputs the first row, FETCH outputs all the rows, STORE stores all rows in a file, NONE does nothing — in this case, the task submits the query without waiting for its completion.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

skipHeader boolean

Default value is : true

Default: true
stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.cli.AwsCLI object
Examples

Run a simple AWS CLI command and capture the output.

id: aws_cli
namespace: company.team
tasks:
  - id: cli
    type: io.kestra.plugin.aws.cli.AwsCLI
    accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}"
    secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}"
    region: "us-east-1"
    commands:
      - aws sts get-caller-identity | tr -d ' 
' | xargs -0 -I {} echo '::{"outputs":{}}::'

Create a simple S3 bucket.

id: aws_cli
namespace: company.team

tasks:
  - id: cli
    type: io.kestra.plugin.aws.cli.AwsCLI
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    commands:
      - aws s3 mb s3://test-bucket

List all S3 buckets as the task's output.

id: aws_cli
namespace: company.team

tasks:
  - id: cli
    type: io.kestra.plugin.aws.cli.AwsCLI
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    commands:
      - aws s3api list-buckets | tr -d ' 
' | xargs -0 -I {} echo '::{"outputs":{}}::'

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.cli.AwsCLI" required
Constant: "io.kestra.plugin.aws.cli.AwsCLI"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
containerImage string

Default value is : amazon/aws-cli

Default: "amazon/aws-cli"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

outputFormat string

Default value is : JSON

Default: "JSON"
Values: "JSON" "TEXT" "TABLE" "YAML"
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.dynamodb.DeleteItem object
Examples

Delete an item by its key.

id: aws_dynamodb_delete_item
namespace: company.team

tasks:
  - id: delete_item
    type: io.kestra.plugin.aws.dynamodb.DeleteItem
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    tableName: "persons"
    key:
       id: "1"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tableName string required
type const: "io.kestra.plugin.aws.dynamodb.DeleteItem" required
Constant: "io.kestra.plugin.aws.dynamodb.DeleteItem"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

key object

The DynamoDB item identifier.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.dynamodb.GetItem object
Examples

Get an item by its key.

id: aws_dynamodb_get_item
namespace: company.team

tasks:
  - id: get_item
    type: io.kestra.plugin.aws.dynamodb.GetItem
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    tableName: "persons"
    key:
       id: "1"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tableName string required
type const: "io.kestra.plugin.aws.dynamodb.GetItem" required
Constant: "io.kestra.plugin.aws.dynamodb.GetItem"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

key object

The DynamoDB item identifier.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.dynamodb.PutItem object
Examples

Put an item in map form into a table.

id: aws_dynamodb_put_item
namespace: company.team

tasks:
  - id: put_item
    type: io.kestra.plugin.aws.dynamodb.PutItem
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    tableName: "persons"
    item:
      id: 1
      firstname: "John"
      lastname: "Doe"

Put an item in JSON string form into a table.

id: aws_dynamodb_put_item
namespace: company.team

tasks:
  - id: put_item
    type: io.kestra.plugin.aws.dynamodb.PutItem
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    tableName: "persons"
    item: "{{ outputs.task_id.data | json }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tableName string required
type const: "io.kestra.plugin.aws.dynamodb.PutItem" required
Constant: "io.kestra.plugin.aws.dynamodb.PutItem"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

item

The item can be in the form of a JSON string, or a map.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.dynamodb.Query object
Examples

Query items from a table.

    id: aws_dynamo_db_query
    namespace: company.team

    tasks:
      - id: query
        type: io.kestra.plugin.aws.dynamodb.Query
        accessKeyId: "<access-key>"
        secretKeyId: "<secret-key>"
        region: "eu-central-1"
        tableName: "persons"
        keyConditionExpression: id = :id
        expressionAttributeValues:
          :id: "1"

Query items from a table with a filter expression.

id: aws_dynamo_db_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.aws.dynamodb.Query
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    tableName: "persons"
    keyConditionExpression: id = :id
    expressionAttributeValues:
      :id: "1"
      :lastname: "Doe"

expressionAttributeValues object required

It's a map of string -> object.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
keyConditionExpression string required
tableName string required
type const: "io.kestra.plugin.aws.dynamodb.Query" required
Constant: "io.kestra.plugin.aws.dynamodb.Query"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

fetchType string

FETCH_ONE output the first row, FETCH output all the rows, STORE store all rows in a file, NONE do nothing.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
filterExpression string

Query filter expression.

limit integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.dynamodb.Scan object
Examples

Scan all items from a table.

id: aws_dynamo_db_scan
namespace: company.team

tasks:
  - id: scan
    type: io.kestra.plugin.aws.dynamodb.Scan
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    tableName: "persons"

Scan items from a table with a filter expression.

id: aws_dynamo_db_scan
namespace: company.team

tasks:
  - id: scan
    type: io.kestra.plugin.aws.dynamodb.Scan
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    tableName: "persons"
    filterExpression: "lastname = :lastname"
    expressionAttributeValues:
      :lastname: "Doe"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tableName string required
type const: "io.kestra.plugin.aws.dynamodb.Scan" required
Constant: "io.kestra.plugin.aws.dynamodb.Scan"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

expressionAttributeValues object

It's a map of string -> object.

fetchType string

FETCH_ONE output the first row, FETCH output all the rows, STORE store all rows in a file, NONE do nothing.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
filterExpression string

When used, expressionAttributeValues property must also be provided.

limit integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.ecr.GetAuthToken object
Examples

Retrieve the AWS ECR authorization token.

id: aws_ecr_get_auth_token
namespace: company.team

tasks:
  - id: get_auth_token
    type: io.kestra.plugin.aws.ecr.GetAuthToken
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.ecr.GetAuthToken" required
Constant: "io.kestra.plugin.aws.ecr.GetAuthToken"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.eventbridge.PutEvents object
Examples

Send multiple custom events as maps to Amazon EventBridge so that they can be matched to rules.

id: aws_event_bridge_put_events
namespace: company.team

tasks:
  - id: put_events
    type: io.kestra.plugin.aws.eventbridge.PutEvents
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    entries:
      - eventBusName: "events"
        source: "Kestra"
        detailType: "my_object"
        detail:
          message: "hello from EventBridge and Kestra"

Send multiple custom events as a JSON string to Amazon EventBridge so that they can be matched to rules.

id: aws_event_bridge_put_events
namespace: company.team

tasks:
  - id: put_events
    type: io.kestra.plugin.aws.eventbridge.PutEvents
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    entries:
      - eventBusName: "events"
        source: "Kestra"
        detailType: "my_object"
        detail: "{"message": "hello from EventBridge and Kestra"}"
        resources:
          - "arn:aws:iam::123456789012:user/johndoe"

A list of at least one EventBridge entry.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.eventbridge.PutEvents" required
Constant: "io.kestra.plugin.aws.eventbridge.PutEvents"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

failOnUnsuccessfulEvents boolean

If true, the task will fail when any event fails to be sent.

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.eventbridge.model.Entry object
detailType string required
eventBusName string required
source string required
detail

Can be a JSON string, or a map.

resources string[]

AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.

io.kestra.plugin.aws.kinesis.PutRecords object
Examples

Send multiple records as maps to Amazon Kinesis Data Streams. Check the following AWS API reference for the structure of the PutRecordsRequestEntry request payload.

id: aws_kinesis_put_records
namespace: company.team

tasks:
  - id: put_records
    type: io.kestra.plugin.aws.kinesis.PutRecords
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    streamName: "mystream"
    records:
      - data: "user sign-in event"
        explicitHashKey: "optional hash value overriding the partition key"
        partitionKey: "user1"
      - data: "user sign-out event"
        partitionKey: "user1"

Send multiple records from an internal storage ion file to Amazon Kinesis Data Streams.

id: aws_kinesis_put_records
namespace: company.team

tasks:
  - id: put_records
    type: io.kestra.plugin.aws.kinesis.PutRecords
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    streamName: "mystream"
    records: kestra:///myfile.ion

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*

A list of at least one record with a map including data and partitionKey properties (those two are required arguments). Check the PutRecordsRequestEntry API reference for a detailed description of required fields.

type const: "io.kestra.plugin.aws.kinesis.PutRecords" required
Constant: "io.kestra.plugin.aws.kinesis.PutRecords"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

failOnUnsuccessfulRecords boolean

If true, the task will fail when any record fails to be sent.

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

streamArn string

Make sure to set either streamName or streamArn. One of those must be provided.

streamName string

Make sure to set either streamName or streamArn. One of those must be provided.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.kinesis.model.Record object
data string required
partitionKey string required
explicitHashKey string
io.kestra.plugin.aws.lambda.Invoke object
Examples

Invoke given Lambda function and wait for its completion.

id: aws_lambda_invoke
namespace: company.team

tasks:
  - id: invoke
    type: io.kestra.plugin.aws.lambda.Invoke
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    functionArn: "arn:aws:lambda:eu-central-1:123456789012:function:my-function"

Invoke given Lambda function with given payload parameters and wait for its completion. Payload is a map of items.

id: aws_lambda_invoke
namespace: company.team

tasks:
  - id: invoke
    type: io.kestra.plugin.aws.lambda.Invoke
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    functionArn: "arn:aws:lambda:eu-central-1:123456789012:function:my-function"
    functionPayload:
        id: 1
        firstname: "John"
        lastname: "Doe"

functionArn string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.lambda.Invoke" required
Constant: "io.kestra.plugin.aws.lambda.Invoke"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

functionPayload object

Request payload. It's a map of string -> object.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.Copy object
Examples
id: aws_s3_copy
namespace: company.team

tasks:
  - id: copy
    type: io.kestra.plugin.aws.s3.Copy
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    from:
      bucket: "my-bucket"
      key: "path/to/file"
    to:
      bucket: "my-bucket2"
      key: "path/to/file2"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.s3.Copy" required
Constant: "io.kestra.plugin.aws.s3.Copy"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
delete boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

from
All of: io.kestra.plugin.aws.s3.Copy-CopyObjectFrom object, The source bucket and key.
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
to
All of: io.kestra.plugin.aws.s3.Copy-CopyObject object, The destination bucket and key.
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.Copy-CopyObject object
bucket string required
key string required
io.kestra.plugin.aws.s3.Copy-CopyObjectFrom object
bucket string required
key string required
versionId string
io.kestra.plugin.aws.s3.CreateBucket object
Examples

Create a new bucket with some options

    id: aws_s3_create_bucket
    namespace: company.team

    tasks:
      - id: create_bucket
        type: io.kestra.plugin.aws.s3.CreateBucket
        accessKeyId: "<access-key>"
        secretKeyId: "<secret-key>"
        region: "eu-central-1"
        bucket: "my-bucket"

bucket string required

The S3 bucket name to create.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.s3.CreateBucket" required
Constant: "io.kestra.plugin.aws.s3.CreateBucket"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

acl string
allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

grantFullControl string

Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.

grantRead string
grantReadACP string
grantWrite string
grantWriteACP string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
objectLockEnabledForBucket boolean
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.Delete object
Examples
id: aws_s3_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.aws.s3.Delete
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    key: "path/to/file"

bucket string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.aws.s3.Delete" required
Constant: "io.kestra.plugin.aws.s3.Delete"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
bypassGovernanceRetention boolean
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mfa string

Required to permanently delete a versioned object if versioning is configured with MFA delete enabled.

region string
requestPayer string

Sets the value of the RequestPayer property for this object.

secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.DeleteList object
Examples
id: aws_s3_delete_list
namespace: company.team

tasks:
  - id: delete_list
    type: io.kestra.plugin.aws.s3.DeleteList
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"

bucket string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.s3.DeleteList" required
Constant: "io.kestra.plugin.aws.s3.DeleteList"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
concurrent integer
min=2
delimiter string
description string
disabled boolean

Default value is : false

Default: false
encodingType string
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

errorOnEmpty boolean

Default value is : false

Default: false
expectedBucketOwner string

If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
marker string

Amazon S3 starts listing after this specified key. Marker can be any key in the bucket.

maxKeys integer

By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

Default value is : 1000

Default: 1000
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

region string
requestPayer string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.Download object
Examples
id: aws_s3_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.aws.s3.Download
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    key: "path/to/file"

bucket string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.aws.s3.Download" required
Constant: "io.kestra.plugin.aws.s3.Download"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
requestPayer string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
versionId string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.Downloads object
Examples
id: aws_s3_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.aws.s3.Downloads
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"

action string required
Values: "MOVE" "DELETE" "NONE"
bucket string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.s3.Downloads" required
Constant: "io.kestra.plugin.aws.s3.Downloads"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean

Default value is : false

Default: false
delimiter string
description string
disabled boolean

Default value is : false

Default: false
encodingType string
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

expectedBucketOwner string

If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
marker string

Amazon S3 starts listing after this specified key. Marker can be any key in the bucket.

maxKeys integer

By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

Default value is : 1000

Default: 1000
moveTo
All of: io.kestra.plugin.aws.s3.Copy-CopyObject object, The destination bucket and key for `MOVE` action.
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

region string
requestPayer string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.List object
Examples
id: aws_s3_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.aws.s3.List
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"

bucket string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.s3.List" required
Constant: "io.kestra.plugin.aws.s3.List"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
delimiter string
description string
disabled boolean

Default value is : false

Default: false
encodingType string
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

expectedBucketOwner string

If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
marker string

Amazon S3 starts listing after this specified key. Marker can be any key in the bucket.

maxKeys integer

By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

Default value is : 1000

Default: 1000
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

region string
requestPayer string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.Trigger object

This trigger will poll every interval s3 bucket. You can search for all files in a bucket or directory in from or you can filter the files with a regExp. The detection is atomic, internally we do a list and interact only with files listed. Once a file is detected, we download the file on internal storage and processed with declared action in order to move or delete the files from the bucket (to avoid double detection on new poll).##### Examples

Wait for a list of files on a s3 bucket and iterate through the files.

id: s3_listen
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
    value: "{{ trigger.objects | jq('.[].uri') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.aws.s3.Trigger
    interval: "PT5M"
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"
    action: MOVE
    moveTo:
      key: archive
      bucket: "new-bucket"

Wait for a list of files on a s3 bucket and iterate through the files. Delete files manually after processing to prevent infinite triggering.

    id: s3_listen
    namespace: company.team

    tasks:
      - id: each
        type: io.kestra.plugin.core.flow.EachSequential
        tasks:
          - id: return
            type: io.kestra.plugin.core.debug.Return
            format: "{{ taskrun.value }}"

          - id: delete
            type: io.kestra.plugin.aws.s3.Delete
            accessKeyId: "<access-key>"
            secretKeyId: "<secret-key>"
            region: "eu-central-1"
            bucket: "my-bucket"
            key: "{{ taskrun.value }}"
        value: "{{ trigger.objects | jq('.[].key') }}"

    triggers:
      - id: watch
        type: io.kestra.plugin.aws.s3.Trigger
        interval: "PT5M"
        accessKeyId: "<access-key>"
        secretKeyId: "<secret-key>"
        region: "eu-central-1"
        bucket: "my-bucket"
        prefix: "sub-dir"
        action: NONE

action string required
Values: "MOVE" "DELETE" "NONE"
bucket string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.aws.s3.Trigger" required
Constant: "io.kestra.plugin.aws.s3.Trigger"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

conditions array
delimiter string
description string
disabled boolean

Default value is : false

Default: false
encodingType string
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

expectedBucketOwner string

If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
marker string

Amazon S3 starts listing after this specified key. Marker can be any key in the bucket.

maxKeys integer

By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

Default value is : 1000

Default: 1000
moveTo
All of: io.kestra.plugin.aws.s3.Copy-CopyObject object, The destination bucket and key for `MOVE` action.
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

region string
requestPayer string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stopAfter string[]
stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.s3.Upload object
Examples
id: aws_s3_upload
namespace: company.team

inputs:
  - id: myfile
    type: FILE

tasks:
  - id: upload
    type: io.kestra.plugin.aws.s3.Upload
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    from: "{{ inputs.myfile }}"
    bucket: "my-bucket"
    key: "path/to/file"

bucket string required
from io.kestra.plugin.aws.s3.List | string required

Can be a single file, a list of files or json array.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required

a full key (with filename) or the directory path if from is multiple files.

type const: "io.kestra.plugin.aws.s3.Upload" required
Constant: "io.kestra.plugin.aws.s3.Upload"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

acl string
allowFailure boolean

Default value is : false

Default: false
bucketKeyEnabled boolean

Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

cacheControl string
checksum string

Must be used in pair with checksumAlgorithm to defined the expect algorithm of these values

checksumAlgorithm string
Values: "CRC32" "CRC32_C" "SHA1" "SHA256" "UNKNOWN_TO_SDK_VERSION"
compatibilityMode boolean

Default value is : false

Default: false
contentDisposition string
contentEncoding string

And thus, what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

contentLanguage string
contentLength integer

This parameter is useful when the size of the body cannot be determined automatically.

contentType string
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

expectedBucketOwner string

If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

expires string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata object
objectLockLegalHoldStatus string
Values: "ON" "OFF" "UNKNOWN_TO_SDK_VERSION"
objectLockMode string
Values: "GOVERNANCE" "COMPLIANCE" "UNKNOWN_TO_SDK_VERSION"
objectLockRetainUntilDate string
region string
requestPayer string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

serverSideEncryption string

For example, AES256, aws:kms, aws:kms:dsse

Values: "AES256" "AWS_KMS" "AWS_KMS_DSSE" "UNKNOWN_TO_SDK_VERSION"
sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

storageClass string
Values: "STANDARD" "REDUCED_REDUNDANCY" "STANDARD_IA" "ONEZONE_IA" "INTELLIGENT_TIERING" "GLACIER" "DEEP_ARCHIVE" "OUTPOSTS" "GLACIER_IR" "SNOW" "EXPRESS_ONEZONE" "UNKNOWN_TO_SDK_VERSION"
stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

tagging object
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.sns.Publish object
Examples
id: aws_sns_publish
namespace: company.team

tasks:
  - id: publish
    type: io.kestra.plugin.aws.sns.Publish
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    topicArn: "arn:aws:sns:eu-central-1:000000000000:MessageTopic"
    from:
    - data: Hello World
    - data: Hello Kestra
      subject: Kestra

from required

Can be an internal storage URI, a list of SNS messages, or a single SNS message.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topicArn string required
type const: "io.kestra.plugin.aws.sns.Publish" required
Constant: "io.kestra.plugin.aws.sns.Publish"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.sqs.Consume object

Requires maxDuration or maxRecords.##### Examples

id: aws_sqs_consume
namespace: company.team

tasks:
  - id: consume
    type: io.kestra.plugin.aws.sqs.Consume
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    queueUrl: "https://sqs.eu-central-1.amazonaws.com/000000000000/test-queue"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queueUrl string required
type const: "io.kestra.plugin.aws.sqs.Consume" required
Constant: "io.kestra.plugin.aws.sqs.Consume"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string
format=duration
maxRecords integer
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.sqs.Publish object
Examples
id: aws_sqs_publish
namespace: company.team

tasks:
  - id: publish
    type: io.kestra.plugin.aws.sqs.Publish
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    queueUrl: "https://sqs.eu-central-1.amazonaws.com/000000000000/test-queue"
    from:
    - data: Hello World
    - data: Hello Kestra
      delaySeconds: 5

from string | array | io.kestra.plugin.aws.sqs.model.Message required

Can be an internal storage URI, a list of SQS messages, or a single SQS message.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queueUrl string required
type const: "io.kestra.plugin.aws.sqs.Publish" required
Constant: "io.kestra.plugin.aws.sqs.Publish"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

allowFailure boolean

Default value is : false

Default: false
compatibilityMode boolean
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.sqs.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.aws.sqs.Trigger instead.##### Examples

Consume a message from an SQS queue in real-time.

id: sqs
namespace: company.team

tasks:
- id: log
  type: io.kestra.plugin.core.log.Log
  message: "{{ trigger.data }}"

triggers:
- id: realtime_trigger
  type: io.kestra.plugin.aws.sqs.RealtimeTrigger
  accessKeyId: "access_key"
  secretKeyId: "secret_key"
  region: "eu-central-1"
  queueUrl: https://sqs.eu-central-1.amazonaws.com/000000000000/test-queue
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queueUrl string required
type const: "io.kestra.plugin.aws.sqs.RealtimeTrigger" required
Constant: "io.kestra.plugin.aws.sqs.RealtimeTrigger"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

clientRetryMaxAttempts integer

Default value is : 3

Default: 3
conditions array
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxNumberOfMessage integer

Increasing this value can reduce the number of requests made to SQS. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 10.

Default value is : 5

Default: 5
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stopAfter string[]
stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

waitTime string

Default value is : 20.000000000

Default: 20.0
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.sqs.Trigger object

Requires maxDuration or maxRecords. Note that you don't need an extra task to consume the message from the event trigger. The trigger will automatically consume messages and you can retrieve their content in your flow using the {{ trigger.uri }} variable. If you would like to consume each message from an SQS queue in real-time and create one execution per message, you can use the io.kestra.plugin.aws.sqs.RealtimeTrigger instead.##### Examples

id: sqs
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: trigger
    type: io.kestra.plugin.aws.sqs.Trigger
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    queueUrl: "https://sqs.eu-central-1.amazonaws.com/000000000000/test-queue"
    maxRecords: 10

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queueUrl string required
type const: "io.kestra.plugin.aws.sqs.Trigger" required
Constant: "io.kestra.plugin.aws.sqs.Trigger"
accessKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

conditions array
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string

This property allows you to use a different S3 compatible storage backend.

interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string
format=duration
maxRecords integer
region string
secretKeyId string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
sessionToken string

If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stopAfter string[]
stsEndpointOverride string
stsRoleArn string

The Amazon Resource Name (ARN) of the role to assume. If set the task will use the StsAssumeRoleCredentialsProvider. If no credentials are defined, we will use the default credentials provider chain to fetch credentials.

stsRoleExternalId string

A unique identifier that might be required when you assume a role in another account. This property is only used when an stsRoleArn is defined.

stsRoleSessionDuration string

The duration of the role session (default: 15 minutes, i.e., PT15M). This property is only used when an stsRoleArn is defined.

Default value is : 900.000000000

Default: 900.0
format=duration
stsRoleSessionName string

This property is only used when an stsRoleArn is defined.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.aws.sqs.model.Message object
data string required
deduplicationId string
delaySeconds integer
groupId string
io.kestra.plugin.azure.batch.job.Create object
Examples
id: azure_batch_job_create
namespace: company.team

tasks:
  - id: create
    type: io.kestra.plugin.azure.batch.job.Create
    endpoint: https://***.francecentral.batch.azure.com
    account: <batch-account>
    accessKey: <access-key>
    poolId: <pool-id>
    job:
      id: <job-name>
    tasks:
      - id: env
        commands:
          - 'echo t1=$ENV_STRING'
        environments:
          ENV_STRING: "{{ inputs.first }}"

      - id: echo
        commands:
          - 'echo t2={{ inputs.second }} 1>&2'

      - id: for
        commands:
          -  'for i in $(seq 10); do echo t3=$i; done'

      - id: vars
        commands:
          - echo '::{"outputs":{"extract":"'$(cat files/in/in.txt)'"}::'
        resourceFiles:
          - httpUrl: https://unittestkt.blob.core.windows.net/tasks/***?sv=***&se=***&sr=***&sp=***&sig=***
          filePath: files/in/in.txt

      - id: output
        commands:
          - 'mkdir -p outs/child/sub'
          - 'echo 1 > outs/1.txt'
          - 'echo 2 > outs/child/2.txt'
          - 'echo 3 > outs/child/sub/3.txt'
        outputFiles:
          - outs/1.txt
        outputDirs:
          - outs/child

Use a container to start the task, the pool must use a microsoft-azure-batch publisher.

id: azure_batch_job_create
namespace: company.team

tasks:
  - id: create
    type: io.kestra.plugin.azure.batch.job.Create
    endpoint: https://***.francecentral.batch.azure.com
    account: <batch-account>
    accessKey: <access-key>
    poolId: <pool-id>
    job:
      id: <job-name>
    tasks:
      - id: echo
        commands:
          - 'python --version'
        containerSettings:
          imageName: python

endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
job required
All of: io.kestra.plugin.azure.batch.models.Job object, The job to create.
poolId string required
tasks array required
type const: "io.kestra.plugin.azure.batch.job.Create" required
Constant: "io.kestra.plugin.azure.batch.job.Create"
accessKey string
account string
allowFailure boolean

Default value is : false

Default: false
completionCheckInterval string

Default value is : 1.000000000

Default: 1.0
format=duration
delete boolean

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

If null, there is no timeout and the task is delegated to Azure Batch.

format=duration
resume boolean

Default value is : true

Default: true
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.batch.models.ComputeNodeIdentityReference object
resourceId string
io.kestra.plugin.azure.batch.models.ContainerRegistry object
identityReference
All of: io.kestra.plugin.azure.batch.models.ComputeNodeIdentityReference object, The reference to the user assigned identity to use to access the Azure Container Registry instead of username and password.
password string
registryServer string

If omitted, the default is "docker.io".

userName string
io.kestra.plugin.azure.batch.models.Job object
id string required

The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).

maxLength=64
displayName string

The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.

maxLength=1024
labels object
maxParallelTasks integer

The value of maxParallelTasks must be -1 or greater than 0, if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API.

priority integer

Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0.

io.kestra.plugin.azure.batch.models.OutputFile object
destination required
All of: io.kestra.plugin.azure.batch.models.OutputFileDestination object, The destination for the output file(s).
filePattern string

Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example, pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. Brackets can include a negation to match any character not specified (for example, [!abc] matches any character but a, b, or c). If a file name starts with "." it is ignored by default but may be matched by specifying it explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple example: **\*.txt matches any file that does not start in '.' and ends with .txt in the Task working directory or any subdirectory. If the filename contains a wildcard character it can be escaped using brackets (for example, abc[*] would match a file named abc*). Note that both \ and / are treated as directory separators on Windows, but only / is on Linux.Environment variables (%var% on Windows or $var on Linux) are expanded prior to the pattern being applied.

uploadOptions
All of: io.kestra.plugin.azure.batch.models.OutputFileUploadOptions object, Additional options for the upload operation, including the conditions under which to perform the upload.
io.kestra.plugin.azure.batch.models.OutputFileBlobContainerDestination object
containerUrl string required

If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container.

identityReference
All of: io.kestra.plugin.azure.batch.models.ComputeNodeIdentityReference object, The reference to the user assigned identity to use to access Azure Blob Storage specified by `containerUrl`.
path string

If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name.

io.kestra.plugin.azure.batch.models.OutputFileDestination object
container required
All of: io.kestra.plugin.azure.batch.models.OutputFileBlobContainerDestination object, A location in Azure Blob Storage to which the files are uploaded.
io.kestra.plugin.azure.batch.models.OutputFileUploadOptions object
uploadCondition string

Default value is : taskcompletion

Default: "taskcompletion"
Values: "TASK_SUCCESS" "TASK_FAILURE" "TASK_COMPLETION"
io.kestra.plugin.azure.batch.models.ResourceFile object
autoStorageContainerName string

The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive, and one of them must be specified.

blobPrefix string

Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial file name or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded.

fileMode string

This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file.

filePath string

If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the file name. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using ..).

httpUrl string

The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive, and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access.

identityReference
All of: io.kestra.plugin.azure.batch.models.ComputeNodeIdentityReference object, The reference to the user assigned identity to use to access Azure Blob Storage specified by `storageContainerUrl` or `httpUrl`.
storageContainerUrl string

The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive, and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access.

io.kestra.plugin.azure.batch.models.Task object
commands string[] required

For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example, using cmd /c MyCommand in Windows or /bin/sh -c MyCommand in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable.

Command will be passed as /bin/sh -c "command" by default.

id string required

The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). If not provided, a random UUID will be generated.

maxLength=64
constraints
All of: io.kestra.plugin.azure.batch.models.TaskConstraints object, The execution constraints that apply to this Task.
containerSettings
All of: io.kestra.plugin.azure.batch.models.TaskContainerSettings object, The settings for the container under which the Task runs.
displayName string

The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.

maxLength=1024
environments object
interpreter string

Default value is : /bin/sh

Default: "/bin/sh"
minLength=1
interpreterArgs string[]

Default value is : - -c

Default value is : - -c

Default:
[
  "-c"
]
outputDirs string[]

List of keys that will generate temporary directories. In the command, you can use a special variable named outputDirs.key. If you add a file with ["myDir"], you can use the special variable echo 1 >> {{ outputDirs.myDir }}/file1.txt and echo 2 >> {{ outputDirs.myDir }}/file2.txt, and both files will be uploaded to the internal storage. Then, you can use them on other tasks using {{ outputs.taskId.files['myDir/file1.txt'] }}

outputFiles string[]

List of keys that will generate temporary files. In the command, you can use a special variable named outputFiles.key. If you add a file with ["first"], you can use the special variable echo 1 >> {{ outputFiles.first }}on this task, and reference this file on others tasks using {{ outputs.taskId.outputFiles.first }}.

requiredSlots integer

The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1.

resourceFiles array

For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.

uploadFiles array

For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed.

io.kestra.plugin.azure.batch.models.TaskConstraints object
maxTaskRetryCount integer

The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit.

maxWallClockTime string

If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run.

format=duration
retentionTime string

After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted.

format=duration
io.kestra.plugin.azure.batch.models.TaskContainerSettings object
imageName string required

This is the full Image reference, as would be specified to docker pull. If no tag is provided as part of the Image name, the tag :latest is used as a default.

containerRunOptions string

These additional options are supplied as arguments to the docker create command, in addition to those controlled by the Batch Service.

registry
All of: io.kestra.plugin.azure.batch.models.ContainerRegistry object, The private registry which contains the container image.
workingDirectory string

The default is taskWorkingDirectory. Possible values include: taskWorkingDirectory, containerImageDefault.

Values: "TASK_WORKING_DIRECTORY" "CONTAINER_IMAGE_DEFAULT"
io.kestra.plugin.azure.batch.pool.Resize object
Examples
id: azure_batch_pool_resize
namespace: company.team

tasks:
  - id: resize
    type: io.kestra.plugin.azure.batch.pool.Resize
    poolId: "<your-pool-id>"
    targetDedicatedNodes: "12"

endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
poolId string required
type const: "io.kestra.plugin.azure.batch.pool.Resize" required
Constant: "io.kestra.plugin.azure.batch.pool.Resize"
accessKey string
account string
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
targetDedicatedNodes integer

Default value is : 0

Default: 0
targetLowPriorityNodes integer

Default value is : 0

Default: 0
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.cli.AzCLI object
Examples

List Azure Active Directory users for the currently authenticated tenant.

id: azure_cli
namespace: company.team

tasks:
  - id: az_cli
    type: io.kestra.plugin.azure.cli.AzCLI
    username: "azure_app_id"
    password: "{{ secret('AZURE_SERVICE_PRINCIPAL_PASSWORD') }}"
    tenant: "{{ secret('AZURE_TENANT_ID') }}"
    commands:
      - az ad user list

List all successfully provisioned VMs using a Service Principal authentication.

id: azure_cli
namespace: company.team

tasks:
  - id: az_cli
    type: io.kestra.plugin.azure.cli.AzCLI
    username: "azure_app_id"
    password: "{{ secret('AZURE_SERVICE_PRINCIPAL_PASSWORD') }}"
    tenant: "{{ secret('AZURE_TENANT_ID') }}"
    servicePrincipal: true
    commands:
      - az vm list --query "[?provisioningState=='Succeeded']"

Command without authentication.

id: azure_cli
namespace: company.team

tasks:
  - id: az_cli
    type: io.kestra.plugin.azure.cli.AzCLI
    commands:
      - az --help

List supported regions for the current Azure subscription.

id: azure_cli
namespace: company.team

tasks:
  - id: list_locations
    type: io.kestra.plugin.azure.cli.AzCLI
    tenant: "{{ secret('AZURE_TENANT_ID') }}"
    username: "{{ secret('AZURE_SERVICE_PRINCIPAL_CLIENT_ID') }}"
    password: "{{ secret('AZURE_SERVICE_PRINCIPAL_PASSWORD') }}"
    servicePrincipal: true
    commands:
      - az account list-locations --query "[].{Region:name}" -o table
commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.azure.cli.AzCLI" required
Constant: "io.kestra.plugin.azure.cli.AzCLI"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : mcr.microsoft.com/azure-cli

Default: "mcr.microsoft.com/azure-cli"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

password string
servicePrincipal boolean

Default value is : false

Default: false
taskRunner
tenant string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.datafactory.CreateRun object

Launch an Azure DataFactory pipeline from Kestra. Data Factory contains a series of interconnected systems that provide a complete end-to-end platform for data engineers.##### Examples

id: azure_datafactory_create_run
namespace: company.team

tasks:
  - id: create_run
    type: io.kestra.plugin.azure.datafactory.CreateRun
    factoryName: exampleFactoryName
    pipelineName: examplePipeline
    resourceGroupName: exampleResourceGroup
    subscriptionId: 12345678-1234-1234-1234-12345678abc
    tenantId: "{{ secret('DATAFACTORY_TENANT_ID') }}"
    clientId: "{{ secret('DATAFACTORY_CLIENT_ID') }}"
    clientSecret: "{{ secret('DATAFACTORY_CLIENT_SECRET') }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subscriptionId string required
tenantId string required
type const: "io.kestra.plugin.azure.datafactory.CreateRun" required
Constant: "io.kestra.plugin.azure.datafactory.CreateRun"
allowFailure boolean

Default value is : false

Default: false
clientId string

Client ID of the Azure service principal. If you don't have a service principal, refer to create a service principal with Azure CLI.

Default value is : --- ""

Default: ""
clientSecret string

Service principal client secret. The tenantId, clientId and clientSecret of the service principal are required for this credential to acquire an access token.

Default value is : --- ""

Default: ""
description string
disabled boolean

Default value is : false

Default: false
factoryName string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parameters object

Default value is : "{}"

Default: "{}"
pemCertificate string
Your stored PEM certificate.
The tenantId, clientId and clientCertificate of the service principal are required for this credential to acquire an access token.

Default value is : --- ""

Default: ""
pipelineName string
resourceGroupName string
timeout string
format=duration
wait boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.eventhubs.Consume object
Examples

Consume data events from Azure EventHubs.

id: azure_eventhubs_consume_data_events
namespace: company.team

tasks:
  - id: consume_from_eventhub
    type: io.kestra.plugin.azure.eventhubs.Consume
    eventHubName: my_eventhub
    namespace: my_eventhub_namespace
    connectionString: "{{ secret('EVENTHUBS_CONNECTION') }}"
    bodyDeserializer: JSON
    consumerGroup: "$Default"
    checkpointStoreProperties:
      containerName: kestra
      connectionString: "{{ secret('BLOB_CONNECTION') }}"

eventHubName string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
type const: "io.kestra.plugin.azure.eventhubs.Consume" required
Constant: "io.kestra.plugin.azure.eventhubs.Consume"
allowFailure boolean

Default value is : false

Default: false
bodyDeserializer string

Default value is : STRING

Default: "STRING"
Values: "STRING" "BINARY" "ION" "JSON"
bodyDeserializerProperties object

Configs in key/value pairs.

Default value is : {}

Default:
{}
checkpointStoreProperties object

Azure Event Hubs Checkpoint Store can be used for storing checkpoints while processing events from Azure Event Hubs.

Default value is : {}

Default:
{}
clientMaxRetries integer

Default value is : 5

Default: 5
clientRetryDelay integer

Default value is : 500

Default: 500
connectionString string
consumerGroup string

Default value is : $Default

Default: "$Default"
customEndpointAddress string
description string
disabled boolean

Default value is : false

Default: false
enqueueTime string

Configs in key/value pairs.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxBatchSizePerPartition integer

Default value is : 50

Default: 50
maxDuration string

Default value is : 10.000000000

Default: 10.0
format=duration
maxWaitTimePerPartition string

Default value is : 5.000000000

Default: 5.0
format=duration
partitionStartingPosition string

Default value is : EARLIEST

Default: "EARLIEST"
Values: "EARLIEST" "LATEST" "INSTANT"
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.eventhubs.Produce object
Examples

Publish a file as events into Azure EventHubs.

id: azure_eventhubs_send_events
namespace: company.team

inputs:
  - id: file
    type: FILE
    description: a CSV file with columns id, username, tweet, and timestamp

tasks:
  - id: read_csv_file
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ inputs.file }}"

  - id: transform_row_to_json
    type: io.kestra.plugin.scripts.nashorn.FileTransform
    from: "{{ outputs.read_csv_file.uri }}"
    script: |
      var result = {
        "body": {
          "username": row.username,
          "tweet": row.tweet
        }
      };
      row = result

  - id: send_to_eventhub
    type: io.kestra.plugin.azure.eventhubs.Produce
    from: "{{ outputs.transform_row_to_json.uri }}"
    eventHubName: my_eventhub
    namespace: my_event_hub_namespace
    connectionString: "{{ secret('EVENTHUBS_CONNECTION') }}"
    maxBatchSizeInBytes: 4096
    maxEventsPerBatch: 100
    bodySerializer: "JSON"
    bodyContentType: application/json
    eventProperties:
      source: kestra

eventHubName string required
from string | array | object required

Can be an internal storage URI, a map (i.e. a list of key-value pairs) or a list of maps. The following keys are supported: from, contentType, properties.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
type const: "io.kestra.plugin.azure.eventhubs.Produce" required
Constant: "io.kestra.plugin.azure.eventhubs.Produce"
allowFailure boolean

Default value is : false

Default: false
bodyContentType string

The MIME type describing the data contained in event body allowing consumers to make informed decisions for inspecting and processing the event.

bodySerializer string

Default value is : STRING

Default: "STRING"
Values: "STRING" "BINARY" "ION" "JSON"
bodySerializerProperties object

Configs in key/value pairs.

Default value is : {}

Default:
{}
clientMaxRetries integer

Default value is : 5

Default: 5
clientRetryDelay integer

Default value is : 500

Default: 500
connectionString string
customEndpointAddress string
description string
disabled boolean

Default value is : false

Default: false
eventProperties object

The event properties which may be used for passing metadata associated with the event body during Event Hubs operations.

Default value is : {}

Default:
{}
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxBatchSizeInBytes integer
maxEventsPerBatch integer

Default value is : 1000

Default: 1000
partitionKey string

Events with the same partitionKey are hashed and sent to the same partition. The provided partitionKey will be used for all the events sent by the Produce task.

sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.eventhubs.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.azure.eventhubs.Trigger instead.##### Examples

Trigger flow based on events received from Azure Event Hubs in real-time.

id: azure_eventhubs_realtime_trigger
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: Hello there! I received {{ trigger.body }} from Azure EventHubs!

triggers:
  - id: read_from_eventhub
    type: io.kestra.plugin.azure.eventhubs.RealtimeTrigger
    eventHubName: my_eventhub
    namespace: my_eventhub_namespace
    connectionString: "{{ secret('EVENTHUBS_CONNECTION') }}"
    bodyDeserializer: JSON
    consumerGroup: "$Default"
    checkpointStoreProperties:
      containerName: kestra
      connectionString: "{{ secret('BLOB_CONNECTION') }}"

eventHubName string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
type const: "io.kestra.plugin.azure.eventhubs.RealtimeTrigger" required
Constant: "io.kestra.plugin.azure.eventhubs.RealtimeTrigger"
bodyDeserializer string

Default value is : STRING

Default: "STRING"
Values: "STRING" "BINARY" "ION" "JSON"
bodyDeserializerProperties object

Configs in key/value pairs.

Default value is : {}

Default:
{}
checkpointStoreProperties object

Azure Event Hubs Checkpoint Store can be used for storing checkpoints while processing events from Azure Event Hubs.

Default value is : {}

Default:
{}
clientMaxRetries integer

Default value is : 5

Default: 5
clientRetryDelay integer

Default value is : 500

Default: 500
conditions array
connectionString string
consumerGroup string

Default value is : $Default

Default: "$Default"
customEndpointAddress string
description string
disabled boolean

Default value is : false

Default: false
enqueueTime string

Configs in key/value pairs.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
partitionStartingPosition string

Default value is : EARLIEST

Default: "EARLIEST"
Values: "EARLIEST" "LATEST" "INSTANT"
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.eventhubs.Trigger object

If you would like to consume each message from Azure Event Hubs in real-time and create one execution per message, you can use the io.kestra.plugin.azure.eventhubs.RealtimeTrigger instead.##### Examples

Trigger flow based on events received from Azure Event Hubs in batch.

id: azure_eventhubs_trigger
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: Hello there! I received {{ trigger.eventsCount }} from Azure EventHubs!

triggers:
  - id: read_from_eventhub
    type: io.kestra.plugin.azure.eventhubs.Trigger
    interval: PT30S
    eventHubName: my_eventhub
    namespace: my_eventhub_namespace
    connectionString: "{{ secret('EVENTHUBS_CONNECTION') }}"
    bodyDeserializer: JSON
    consumerGroup: "$Default"
    checkpointStoreProperties:
      containerName: kestra
      connectionString: "{{ secret('BLOB_CONNECTION') }}"

eventHubName string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
type const: "io.kestra.plugin.azure.eventhubs.Trigger" required
Constant: "io.kestra.plugin.azure.eventhubs.Trigger"
bodyDeserializer string

Default value is : STRING

Default: "STRING"
Values: "STRING" "BINARY" "ION" "JSON"
bodyDeserializerProperties object

Configs in key/value pairs.

Default value is : {}

Default:
{}
checkpointStoreProperties object

Azure Event Hubs Checkpoint Store can be used for storing checkpoints while processing events from Azure Event Hubs.

Default value is : {}

Default:
{}
clientMaxRetries integer

Default value is : 5

Default: 5
clientRetryDelay integer

Default value is : 500

Default: 500
conditions array
connectionString string
consumerGroup string

Default value is : $Default

Default: "$Default"
customEndpointAddress string
description string
disabled boolean

Default value is : false

Default: false
enqueueTime string

Configs in key/value pairs.

interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxBatchSizePerPartition integer

Default value is : 50

Default: 50
maxDuration string

Default value is : 10.000000000

Default: 10.0
format=duration
maxWaitTimePerPartition string

Default value is : 5.000000000

Default: 5.0
format=duration
partitionStartingPosition string

Default value is : EARLIEST

Default: "EARLIEST"
Values: "EARLIEST" "LATEST" "INSTANT"
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.Copy object
Examples
id: azure_storage_blob_copy
namespace: company.team

tasks:
  - id: copy
    type: io.kestra.plugin.azure.storage.blob.Copy
    from:
      container: "my-bucket"
      key: "path/to/file"
    to:
      container: "my-bucket2"
      key: "path/to/file2"

endpoint string required
from required
All of: io.kestra.plugin.azure.storage.blob.Copy-CopyObject object, The source from where the file should be copied.
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to required
All of: io.kestra.plugin.azure.storage.blob.Copy-CopyObject object, The destination to copy the file to.
type const: "io.kestra.plugin.azure.storage.blob.Copy" required
Constant: "io.kestra.plugin.azure.storage.blob.Copy"
allowFailure boolean

Default value is : false

Default: false
connectionString string
delete boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.Copy-CopyObject object
container string required
name string required
io.kestra.plugin.azure.storage.blob.Delete object
Examples
id: azure_storage_blob_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.azure.storage.blob.Delete
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    name: "myblob"

container string required
endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.azure.storage.blob.Delete" required
Constant: "io.kestra.plugin.azure.storage.blob.Delete"
allowFailure boolean

Default value is : false

Default: false
connectionString string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.DeleteList object
Examples
id: azure_storage_blob_delete_list
namespace: company.team

tasks:
  - id: delete_list
    type: io.kestra.plugin.azure.storage.blob.DeleteList
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    prefix: "sub-dir"
    delimiter: "/"

container string required
endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.azure.storage.blob.DeleteList" required
Constant: "io.kestra.plugin.azure.storage.blob.DeleteList"
allowFailure boolean

Default value is : false

Default: false
concurrent integer
min=2
connectionString string
delimiter string
description string
disabled boolean

Default value is : false

Default: false
errorOnEmpty boolean

Default value is : false

Default: false
filter string

Default value is : FILES

Default: "FILES"
Values: "FILES" "DIRECTORY" "BOTH"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.Download object
Examples
id: azure_storage_blob_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.azure.storage.blob.Download
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    name: "myblob"

container string required
endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.azure.storage.blob.Download" required
Constant: "io.kestra.plugin.azure.storage.blob.Download"
allowFailure boolean

Default value is : false

Default: false
connectionString string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.Downloads object
Examples
id: azure_storage_blob_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.azure.storage.blob.Downloads
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    prefix: "sub-dir"
    delimiter: "/"

action string required
Values: "MOVE" "DELETE" "NONE"
container string required
endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.azure.storage.blob.Downloads" required
Constant: "io.kestra.plugin.azure.storage.blob.Downloads"
allowFailure boolean

Default value is : false

Default: false
connectionString string
delimiter string
description string
disabled boolean

Default value is : false

Default: false
filter string

Default value is : FILES

Default: "FILES"
Values: "FILES" "DIRECTORY" "BOTH"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveTo
All of: io.kestra.plugin.azure.storage.blob.Copy-CopyObject object, The destination container and key.
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.List object
Examples
id: azure_storage_blob_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.azure.storage.blob.List
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    prefix: "sub-dir"
    delimiter: "/"

container string required
endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.azure.storage.blob.List" required
Constant: "io.kestra.plugin.azure.storage.blob.List"
allowFailure boolean

Default value is : false

Default: false
connectionString string
delimiter string
description string
disabled boolean

Default value is : false

Default: false
filter string

Default value is : FILES

Default: "FILES"
Values: "FILES" "DIRECTORY" "BOTH"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.SharedAccess object
Examples
id: azure_storage_blob_shared_access
namespace: company.team

tasks:
  - id: shared_access
    type: io.kestra.plugin.azure.storage.blob.SharedAccess
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    name: "myblob"
    expirationDate: "{{ now() | dateAdd(1, 'DAYS') }}"
    permissions:
      - r

container string required
endpoint string required
expirationDate string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
permissions string[] required
type const: "io.kestra.plugin.azure.storage.blob.SharedAccess" required
Constant: "io.kestra.plugin.azure.storage.blob.SharedAccess"
allowFailure boolean

Default value is : false

Default: false
connectionString string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.Trigger object

This trigger will poll every interval on the Azure Blob Storage. You can search for all files in a container or directory in from or you can filter the files with a regExp.The detection is atomic, internally we do a list and interact only with files listed. Once a file is detected, we download the file on internal storage and processed with declared action in order to move or delete the files from the container (to avoid double detection on new poll)##### Examples

Wait for a list of files on Azure Blob Storage bucket, and then iterate through the files.

id: storage_listen
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
    value: "{{ trigger.blobs | jq('.[].uri') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.azure.storage.blob.Trigger
    interval: PT5M
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    prefix: "trigger/storage-listen"
    action: MOVE
    moveTo:
      container: mydata
      name: archive

Wait for a list of file on a Azure Blob Storage bucket and iterate through the files. Delete files manually after processing to prevent infinite triggering.

id: storage_listen
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
      - id: delete
        type: io.kestra.plugin.azure.storage.blob.Delete
        endpoint: "https://yourblob.blob.core.windows.net"
        connectionString: "DefaultEndpointsProtocol=...=="
        container: "mydata"
        name: "{{ taskrun.value }}"
    value: "{{ trigger.blobs | jq('.[].name') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.azure.storage.blob.Trigger
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    prefix: "trigger/storage_listen"
    action: MOVE
    moveTo:
      container: mydata
      name: archive

action string required
Values: "MOVE" "DELETE" "NONE"
container string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.azure.storage.blob.Trigger" required
Constant: "io.kestra.plugin.azure.storage.blob.Trigger"
conditions array
connectionString string
delimiter string
description string
disabled boolean

Default value is : false

Default: false
endpoint string
filter string

Default value is : FILES

Default: "FILES"
Values: "FILES" "DIRECTORY" "BOTH"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveTo
All of: io.kestra.plugin.azure.storage.blob.Copy-CopyObject object, The destination container and key.
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.Upload object
Examples
id: azure_storage_blob_upload
namespace: company.team

inputs:
  - id: myfile
    type: FILE

tasks:
  - id: upload
    type: io.kestra.plugin.azure.storage.blob.Upload
    endpoint: "https://yourblob.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    container: "mydata"
    from: "{{ inputs.myfile }}"
    name: "myblob"

container string required
endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.azure.storage.blob.Upload" required
Constant: "io.kestra.plugin.azure.storage.blob.Upload"
accessTier string

The operation is allowed on a page blob in a premium Storage Account or a block blob in a blob Storage Account or GPV2 Account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag.

Values: "P4" "P6" "P10" "P15" "P20" "P30" "P40" "P50" "P60" "P70" "P80" "HOT" "COOL" "ARCHIVE"
allowFailure boolean

Default value is : false

Default: false
connectionString string
description string
disabled boolean

Default value is : false

Default: false
from string
immutabilityPolicy object
2 nested properties
expiryTime string
format=date-time
policyMode string
Values: "MUTABLE" "UNLOCKED" "LOCKED"
legalHold boolean

NOTE: Blob Versioning must be enabled on your storage account and the blob must be in a container with immutable storage with versioning enabled to call this API.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata object
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
tags object
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.blob.models.BlobImmutabilityPolicy object
expiryTime string
format=date-time
policyMode string
Values: "MUTABLE" "UNLOCKED" "LOCKED"
io.kestra.plugin.azure.storage.table.Bulk object
Examples
id: azure_storage_table_bulk
namespace: company.team

tasks:
  - id: bulk
    type: io.kestra.plugin.azure.storage.table.Bulk
    endpoint: "https://yourstorageaccount.blob.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    table: "table_name"
    from:
      - partitionKey: "color"
        rowKey: "green"
        type: "UPSERT_MERGE"
        properties:
          "code": "00FF00"

endpoint string required
from required

Can be an internal storage URI or a list of maps in the format partitionKey, rowKey, type, properties, as shown in the example.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
table string required
type const: "io.kestra.plugin.azure.storage.table.Bulk" required
Constant: "io.kestra.plugin.azure.storage.table.Bulk"
allowFailure boolean

Default value is : false

Default: false
connectionString string
defaultType string

Default value is : UPSERT_REPLACE

Default: "UPSERT_REPLACE"
Values: "CREATE" "UPSERT_MERGE" "UPSERT_REPLACE" "UPDATE_MERGE" "UPDATE_REPLACE" "DELETE"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.table.Delete object
Examples
id: azure_storage_table_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.azure.storage.table.Delete
    endpoint: "https://yourstorageaccount.table.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    table: "table_name"
    partitionKey: "color"
    rowKey: "green"

endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
partitionKey string required
table string required
type const: "io.kestra.plugin.azure.storage.table.Delete" required
Constant: "io.kestra.plugin.azure.storage.table.Delete"
allowFailure boolean

Default value is : false

Default: false
connectionString string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
rowKey string
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.table.Get object
Examples
id: azure_storage_table_get
namespace: company.team

tasks:
  - id: get
    type: io.kestra.plugin.azure.storage.table.Get
    endpoint: "https://yourstorageaccount.table.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    table: "table_name"
    partitionKey: "color"
    rowKey: "green"

endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
partitionKey string required
table string required
type const: "io.kestra.plugin.azure.storage.table.Get" required
Constant: "io.kestra.plugin.azure.storage.table.Get"
allowFailure boolean

Default value is : false

Default: false
connectionString string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
rowKey string
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.azure.storage.table.List object

If the filter parameter in the options is set, only entities matching the filter will be returned. If the select parameter is set, only the properties included in the select parameter will be returned for each entity. If the top parameter is set, the maximum number of returned entities per page will be limited to that value.##### Examples

id: azure_storage_table_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.azure.storage.table.List
    endpoint: "https://yourstorageaccount.table.core.windows.net"
    connectionString: "DefaultEndpointsProtocol=...=="
    table: "table_name"

endpoint string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
table string required
type const: "io.kestra.plugin.azure.storage.table.List" required
Constant: "io.kestra.plugin.azure.storage.table.List"
allowFailure boolean

Default value is : false

Default: false
connectionString string
description string
disabled boolean

Default value is : false

Default: false
filter string

You can specify the filter using Filter Strings.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sasToken string

This string should only be the query parameters (with or without a leading '?') and not a full URL.

select string[]
sharedKeyAccountAccessKey string
sharedKeyAccountName string
timeout string
format=duration
top integer
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.cassandra.astradb.AstraDbSession object
clientId string required
clientSecret string required
keyspace string required
proxyAddress
All of: io.kestra.plugin.cassandra.astradb.AstraDbSession-ProxyAddress object, The Astra DB proxy address.
secureBundle string

It must be the ZIP archive containing the secure bundle encoded in base64. Use it only when you are not using the proxy address.

io.kestra.plugin.cassandra.astradb.AstraDbSession-ProxyAddress object
hostname string required
minLength=1
port integer

Default value is : 9042

Default: 9042
io.kestra.plugin.cassandra.astradb.Query object
Examples

Send a CQL query to an Astra DB.

id: cassandra_astradb_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.cassandra.astradb.Query
    session:
      secureBundle: /path/to/secureBundle.zip
      keyspace: astradb_keyspace
      clientId: astradb_clientId
      clientSecret: astradb_clientSecret
    cql: SELECT * FROM CQL_TABLE
    fetch: true

cql string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
session required
All of: io.kestra.plugin.cassandra.astradb.AstraDbSession object, The session connection properties.
type const: "io.kestra.plugin.cassandra.astradb.Query" required
Constant: "io.kestra.plugin.cassandra.astradb.Query"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
store boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.cassandra.astradb.Trigger object
Examples

Wait for a CQL query to return results, and then iterate through rows.

id: astra_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.core.tasks.flows.EachSequential
    tasks:
      - id: return
        type: io.kestra.core.tasks.debugs.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.cassandra.astradb.Trigger
    interval: "PT5M"
    session:
        secureBundle: /path/to/secureBundle.zip
        keyspace: astradb_keyspace
        clientId: astradb_clientId
        clientSecret: astradb_clientSecret
    cql: "SELECT * FROM CQL_KEYSPACE.CQL_TABLE"
    fetch: true

cql string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
session required
All of: io.kestra.plugin.cassandra.astradb.AstraDbSession object, The session connection properties.
type const: "io.kestra.plugin.cassandra.astradb.Trigger" required
Constant: "io.kestra.plugin.cassandra.astradb.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.cassandra.standard.CassandraDbSession object
applicationName string

It will be sent in the STARTUP protocol message, under the key APPLICATION_NAME, for each new connection established by the driver. Currently, this information is used by Insights monitoring (if the target cluster does not support Insights, the entry will be ignored by the server).

endpoints array
localDatacenter string
password string
secureConnection
username string
io.kestra.plugin.cassandra.standard.CassandraDbSession-Endpoint object
hostname string required
minLength=1
port integer

Default value is : 9042

Default: 9042
serverName string

In the context of Cloud, this is the string representation of the host ID.

io.kestra.plugin.cassandra.standard.CassandraDbSession-SecureConnection object
keystorePassword string
keystorePath string
truststorePassword string
truststorePath string
io.kestra.plugin.cassandra.standard.Query object
Examples

Send a CQL query to a Cassandra database.

id: cassandra_standard_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.cassandra.standard.Query
    session:
      endpoints:
        - hostname: localhost
      secureConnection:
        truststorePath: path to .crt file
        truststorePassword: truststore_password
        keystorePath: path to .jks file
        keystorePassword: keystore_password
      username: cassandra_user
      password: cassandra_passwd
    cql: SELECT * FROM CQL_KEYSPACE.CQL_TABLE
    fetch: true

cql string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
session required
All of: io.kestra.plugin.cassandra.standard.CassandraDbSession object, The session connection properties.
type const: "io.kestra.plugin.cassandra.standard.Query" required
Constant: "io.kestra.plugin.cassandra.standard.Query"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
store boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.cassandra.standard.Trigger object
Examples

Wait for a CQL query to return results, and then iterate through rows.

id: cassandra_trigger
namespace: io.kestra.tests

tasks:
  - id: each
    type: io.kestra.core.tasks.flows.EachSequential
    tasks:
      - id: return
        type: io.kestra.core.tasks.debugs.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.cassandra.standard.Trigger
    interval: "PT5M"
    session:
       endpoints:
          - hostname: localhost
       username: cassandra_user
       password: cassandra_passwd
    cql: "SELECT * FROM CQL_KEYSPACE.CQL_TABLE"
    fetch: true

cql string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
session required
All of: io.kestra.plugin.cassandra.standard.CassandraDbSession object, The session connection properties
type const: "io.kestra.plugin.cassandra.standard.Trigger" required
Constant: "io.kestra.plugin.cassandra.standard.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.cloudquery.CloudQueryCLI object
Examples

Run a CloudQuery sync from CLI. You need an API key to download plugins. You can add the API key as an environment variable called CLOUDQUERY_API_KEY.

id: cloudquery_sync_cli
namespace: company.team

tasks:
  - id: hn_to_duckdb
    type: io.kestra.plugin.cloudquery.CloudQueryCLI
    env:
      CLOUDQUERY_API_KEY: "{{ secret('CLOUDQUERY_API_KEY') }}"
    inputFiles:
      config.yml: |
        kind: source
        spec:
          name: hackernews
          path: cloudquery/hackernews
          version: v3.0.13
          tables: ["*"]
          backend_options:
            table_name: cq_cursor
            connection: "@@plugins.duckdb.connection"
          destinations:
            - "duckdb"
          spec:
            item_concurrency: 100
            start_time: "{{ now() | dateAdd(-1, 'DAYS') }}"
          ---
          kind: destination
          spec:
            name: duckdb
            path: cloudquery/duckdb
            version: v4.2.10
            write_mode: overwrite-delete-stale
            spec:
              connection_string: hn.db
    commands:
      - cloudquery sync config.yml --log-console
commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.cloudquery.CloudQueryCLI" required
Constant: "io.kestra.plugin.cloudquery.CloudQueryCLI"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/cloudquery/cloudquery:latest

Default: "ghcr.io/cloudquery/cloudquery:latest"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.cloudquery.Sync object
Examples

Start a CloudQuery sync based on a YAML configuration. You need an API key to download plugins. You can add the API key as an environment variable called CLOUDQUERY_API_KEY.

id: cloudquery_sync
namespace: company.team

tasks:
  - id: hn_to_duckdb
    type: io.kestra.plugin.cloudquery.Sync
    env:
      CLOUDQUERY_API_KEY: "{{ secret('CLOUDQUERY_API_KEY') }}"
    incremental: false
    configs:
      - kind: source
        spec:
          name: hackernews
          path: cloudquery/hackernews
          version: v3.0.13
          tables: ["*"]
          destinations: ["duckdb"]
          spec:
            item_concurrency: 100
            start_time: "{{ now() | dateAdd(-1, 'DAYS') }}"
      - kind: destination
        spec:
          name: duckdb
          path: cloudquery/duckdb
          version: v4.2.10
          write_mode: overwrite-delete-stale
          spec:
            connection_string: hn.db

Start a CloudQuery sync based on a file(s) input.

id: cloudquery_sync
namespace: company.team

tasks:
  - id: hn_to_duckdb
    type: io.kestra.plugin.cloudquery.Sync
    incremental: false
    env:
        AWS_ACCESS_KEY_ID: "{{ secret('AWS_ACCESS_KEY_ID') }}"
        AWS_SECRET_ACCESS_KEY: "{{ secret('AWS_SECRET_ACCESS_KEY') }}"
        AWS_DEFAULT_REGION: "{{ secret('AWS_DEFAULT_REGION') }}"
        CLOUDQUERY_API_KEY: "{{ secret('CLOUDQUERY_API_KEY') }}"
        PG_CONNECTION_STRING: "postgresql://postgres:{{ secret('DB_PASSWORD') }}@host.docker.internal:5432/demo?sslmode=disable"
    configs:
      - sources.yml
      - destination.yml
configs array required

A list of CloudQuery configurations or files containing CloudQuery configurations.

One of: string[] string[], object[] object[]
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.cloudquery.Sync" required
Constant: "io.kestra.plugin.cloudquery.Sync"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/cloudquery/cloudquery:latest

Default: "ghcr.io/cloudquery/cloudquery:latest"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
incremental boolean

Kestra can automatically add a backend option to your sources and store the incremental indexes in the KV Store. Use this boolean to activate this option.

Default value is : false

Default: false
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.compress.ArchiveCompress object
Examples
id: archive_compress
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: "archive_compress"
    type: "io.kestra.plugin.compress.ArchiveCompress"
    from:
      myfile.txt: "{{ inputs.file }}"
    algorithm: ZIP

id: archive_compress
namespace: company.team

tasks:
  - id: products_download
    type: io.kestra.plugin.core.http.Download
    uri: "https://raw.githubusercontent.com/kestra-io/datasets/main/csv/products.csv"

  - id: orders_download
    type: io.kestra.plugin.core.http.Download
    uri: "https://raw.githubusercontent.com/kestra-io/datasets/main/csv/orders.csv"

  - id: archive_compress
    type: "io.kestra.plugin.compress.ArchiveCompress"
    from:
      products.csv: "{{ outputs.products_download.uri }}"
      orders.csv: "{{ outputs.orders_download.uri }}"
    algorithm: TAR
    compression: GZIP

algorithm string required
Values: "AR" "ARJ" "CPIO" "DUMP" "JAR" "TAR" "ZIP"
from Record<string, string> required

The key must be a valid path in the archive and can contain / to represent the directory, the value must be a Kestra internal storage URI. The value can also be a JSON containing multiple keys/values.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.compress.ArchiveCompress" required
Constant: "io.kestra.plugin.compress.ArchiveCompress"
allowFailure boolean

Default value is : false

Default: false
compression string
Values: "BROTLI" "BZIP2" "DEFLATE" "DEFLATE64" "GZIP" "LZ4BLOCK" "LZ4FRAME" "LZMA" "SNAPPY" "SNAPPYFRAME" "XZ" "Z" "ZSTD"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.compress.ArchiveDecompress object
Examples
id: archive_decompress
namespace: company.team

inputs:
  - id: file
    description: Compressed file
    type: FILE

tasks:
  - id: archive_decompress
    type: io.kestra.plugin.compress.ArchiveDecompress
    from: "{{ inputs.file }}"
    algorithm: ZIP
    compression: GZIP

algorithm string required
Values: "AR" "ARJ" "CPIO" "DUMP" "JAR" "TAR" "ZIP"
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.compress.ArchiveDecompress" required
Constant: "io.kestra.plugin.compress.ArchiveDecompress"
allowFailure boolean

Default value is : false

Default: false
compression string
Values: "BROTLI" "BZIP2" "DEFLATE" "DEFLATE64" "GZIP" "LZ4BLOCK" "LZ4FRAME" "LZMA" "SNAPPY" "SNAPPYFRAME" "XZ" "Z" "ZSTD"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.compress.FileCompress object
Examples
id: file_compress
namespace: company.team

inputs:
  - id: file
    description: File to be compressed
    type: FILE

tasks:
  - id: compress
    type: io.kestra.plugin.compress.FileCompress
    from: "{{ inputs.file }}"
    compression: Z

compression string required
Values: "BROTLI" "BZIP2" "DEFLATE" "DEFLATE64" "GZIP" "LZ4BLOCK" "LZ4FRAME" "LZMA" "SNAPPY" "SNAPPYFRAME" "XZ" "Z" "ZSTD"
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.compress.FileCompress" required
Constant: "io.kestra.plugin.compress.FileCompress"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.compress.FileDecompress object
Examples
id: file_decompress
namespace: company.team

inputs:
  - id: file
    description: File to be decompressed
    type: FILE

tasks:
  - id: decompress
    type: io.kestra.plugin.compress.FileDecompress
    from: "{{ inputs.file }}"
    compression: Z

compression string required
Values: "BROTLI" "BZIP2" "DEFLATE" "DEFLATE64" "GZIP" "LZ4BLOCK" "LZ4FRAME" "LZMA" "SNAPPY" "SNAPPYFRAME" "XZ" "Z" "ZSTD"
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.compress.FileDecompress" required
Constant: "io.kestra.plugin.compress.FileDecompress"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.condition.DateTimeBetweenCondition object
Examples
# This will evaluate to true when the trigger date falls after the `after` date.
- conditions:
    - type: io.kestra.plugin.core.condition.DateTimeBetweenCondition
      date: "{{ trigger.date }}"
      after: "2024-01-01T08:30:00Z"

# This will evaluate to true when the trigger date falls between the `before` and `after` dates.
- conditions:
    - type: io.kestra.plugin.core.condition.DateTimeBetweenCondition
      date: "{{ trigger.date }}"
      before: "2024-01-01T08:30:00Z"
      after: "2024-12-31T23:30:00Z"
type const: "io.kestra.plugin.core.condition.DateTimeBetweenCondition" required
Constant: "io.kestra.plugin.core.condition.DateTimeBetweenCondition"
after string

Must be a valid ISO 8601 datetime with the zone identifier (use 'Z' for the default zone identifier).

format=date-time
before string

Must be a valid ISO 8601 datetime with the zone identifier (use 'Z' for the default zone identifier).

format=date-time
date string

Can be any variable or any valid ISO 8601 datetime. By default, it will use the trigger date.

Default value is : "{{ trigger.date }}"

Default: "{{ trigger.date }}"
io.kestra.plugin.core.condition.DayWeekCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.DayWeekCondition
      dayOfWeek: "MONDAY"
dayOfWeek string required
Values: "MONDAY" "TUESDAY" "WEDNESDAY" "THURSDAY" "FRIDAY" "SATURDAY" "SUNDAY"
type const: "io.kestra.plugin.core.condition.DayWeekCondition" required
Constant: "io.kestra.plugin.core.condition.DayWeekCondition"
date string

Can be any variable or any valid ISO 8601 datetime. By default, it will use the trigger date.

Default value is : "{{ trigger.date }}"

Default: "{{ trigger.date }}"
io.kestra.plugin.core.condition.DayWeekInMonthCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.DayWeekInMonthCondition
      dayOfWeek: MONDAY
      dayInMonth: FIRST
dayInMonth string required
Values: "FIRST" "LAST" "SECOND" "THIRD" "FOURTH"
dayOfWeek string required
Values: "MONDAY" "TUESDAY" "WEDNESDAY" "THURSDAY" "FRIDAY" "SATURDAY" "SUNDAY"
type const: "io.kestra.plugin.core.condition.DayWeekInMonthCondition" required
Constant: "io.kestra.plugin.core.condition.DayWeekInMonthCondition"
date string

Can be any variable or any valid ISO 8601 datetime. By default, it will use the trigger date.

Default value is : "{{ trigger.date }}"

Default: "{{ trigger.date }}"
io.kestra.plugin.core.condition.ExecutionFlowCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.ExecutionFlowCondition
      namespace: company.team
      flowId: my-current-flow
flowId string required
namespace string required
type const: "io.kestra.plugin.core.condition.ExecutionFlowCondition" required
Constant: "io.kestra.plugin.core.condition.ExecutionFlowCondition"
io.kestra.plugin.core.condition.ExecutionLabelsCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.ExecutionLabelsCondition
      labels:
         owner: john.doe
labels array | object required

List of labels to match in the execution.

type const: "io.kestra.plugin.core.condition.ExecutionLabelsCondition" required
Constant: "io.kestra.plugin.core.condition.ExecutionLabelsCondition"
io.kestra.plugin.core.condition.ExecutionNamespaceCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
      namespace: company.team
      comparison: PREFIX
namespace string required
type const: "io.kestra.plugin.core.condition.ExecutionNamespaceCondition" required
Constant: "io.kestra.plugin.core.condition.ExecutionNamespaceCondition"
comparison string
Values: "EQUALS" "PREFIX" "SUFFIX"
prefix boolean

Only used when comparison is not set

Default value is : false

Default: false
io.kestra.plugin.core.condition.ExecutionOutputsCondition object

The condition returns false if the execution has no output. If the result is an empty string, a space, or false, the condition will also be considered as false.##### Examples

A condition that will return true for an output matching a specific value.

- conditions:
    - type: io.kestra.plugin.core.condition.ExecutionOutputsCondition
      expression: {{ trigger.outputs.status_code == '200' }}
expression string required
minLength=1
type const: "io.kestra.plugin.core.condition.ExecutionOutputsCondition" required
Constant: "io.kestra.plugin.core.condition.ExecutionOutputsCondition"
io.kestra.plugin.core.condition.ExecutionStatusCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
      in:
        - SUCCESS
      notIn: 
        - FAILED
type const: "io.kestra.plugin.core.condition.ExecutionStatusCondition" required
Constant: "io.kestra.plugin.core.condition.ExecutionStatusCondition"
in string[]
notIn string[]
io.kestra.plugin.core.condition.ExpressionCondition object

If the result is an empty string, a string containing only space or false, the condition will be considered as false.##### Examples

A condition that will return false for a missing variable.

- conditions:
    - type: io.kestra.plugin.core.condition.ExpressionCondition
      expression: {{ unknown is defined }}
expression string required
minLength=1
type const: "io.kestra.plugin.core.condition.ExpressionCondition" required
Constant: "io.kestra.plugin.core.condition.ExpressionCondition"
io.kestra.plugin.core.condition.FlowCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.FlowCondition
      namespace: company.team
      flowId: my-current-flow
flowId string required
namespace string required
type const: "io.kestra.plugin.core.condition.FlowCondition" required
Constant: "io.kestra.plugin.core.condition.FlowCondition"
io.kestra.plugin.core.condition.FlowNamespaceCondition object

Use io.kestra.plugin.core.condition.ExecutionNamespaceCondition instead.##### Examples

- conditions:
    - type: io.kestra.plugin.core.condition.FlowNamespaceCondition
      namespace: io.kestra.tests
      prefix: true
namespace string required
type const: "io.kestra.plugin.core.condition.FlowNamespaceCondition" required
Constant: "io.kestra.plugin.core.condition.FlowNamespaceCondition"
prefix boolean

Default value is : false

Default: false
io.kestra.plugin.core.condition.HasRetryAttemptCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.HasRetryAttemptCondition
      in:
        - KILLED
type const: "io.kestra.plugin.core.condition.HasRetryAttemptCondition" required
Constant: "io.kestra.plugin.core.condition.HasRetryAttemptCondition"
in string[]
notIn string[]
io.kestra.plugin.core.condition.MultipleCondition object

Trigger when all the flows are successfully executed for the first time during the window duration.##### Examples

A flow that is waiting for 2 flows to run successfully in a day

triggers:
  - id: multiple-listen-flow
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
        - SUCCESS
      - id: multiple
        type: io.kestra.plugin.core.condition.MultipleCondition
        window: P1D
        windowAdvance: P0D
        conditions:
          flow-a:
            type: io.kestra.plugin.core.condition.ExecutionFlowCondition
            namespace: io.kestra.demo
            flowId: multiplecondition-flow-a
          flow-b:
            type: io.kestra.plugin.core.condition.ExecutionFlowCondition
            namespace: io.kestra.demo
            flowId: multiplecondition-flow-b
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.condition.MultipleCondition" required
Constant: "io.kestra.plugin.core.condition.MultipleCondition"
window string required

See ISO_8601 Durations for more information of available duration value. The start of the window is always based on midnight except if you set windowAdvance parameter. Eg if you have a 10 minutes (PT10M) window, the first window will be 00:00 to 00:10 and a new window will be started each 10 minutes

format=duration
windowAdvance string required

Allow to specify the start hour of the window Eg: you want a window of 6 hours (window=PT6H). By default the check will be done between: 00:00 and 06:00 - 06:00 and 12:00 - 12:00 and 18:00 - 18:00 and 00:00 If you want to check the window between: 03:00 and 09:00 - 09:00 and 15:00 - 15:00 and 21:00 - 21:00 and 3:00You will have to shift the window of 3 hours by settings windowAdvance: PT3H

format=duration
io.kestra.plugin.core.condition.NotCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.NotCondition
      conditions:
      -  type: io.kestra.plugin.core.condition.DateBetweenCondition
         after: "2013-09-08T16:19:12"
conditions array required

If any conditions is true, it will prevent the event's execution.

minItems=1
type const: "io.kestra.plugin.core.condition.NotCondition" required
Constant: "io.kestra.plugin.core.condition.NotCondition"
io.kestra.plugin.core.condition.OrCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.OrCondition
      conditions:
      -  type: io.kestra.plugin.core.condition.DayWeekCondition
         dayOfWeek: "MONDAY"
      -  type: io.kestra.plugin.core.condition.DayWeekCondition
         dayOfWeek: "SUNDAY"
conditions array required

If any condition is true, it will allow the event's execution.

minItems=1
type const: "io.kestra.plugin.core.condition.OrCondition" required
Constant: "io.kestra.plugin.core.condition.OrCondition"
io.kestra.plugin.core.condition.PublicHolidayCondition object
Examples

Condition to allow events on public holidays.

- conditions:
    - type: io.kestra.plugin.core.condition.PublicHolidayCondition
      country: FR

Conditions to allow events on work days.

- conditions:
    - type: io.kestra.plugin.core.condition.NotCondition
      conditions:
        - type: io.kestra.plugin.core.condition.PublicHolidayCondition
          country: FR
        - type: io.kestra.plugin.core.condition.WeekendCondition

type const: "io.kestra.plugin.core.condition.PublicHolidayCondition" required
Constant: "io.kestra.plugin.core.condition.PublicHolidayCondition"
country string

It uses the Jollyday library for public holiday calendar that supports more than 70 countries.

date string

Can be any variable or any valid ISO 8601 datetime. By default, it will use the trigger date.

Default value is : "{{ trigger.date }}"

Default: "{{ trigger.date }}"
minLength=1
subDivision string

It uses the Jollyday library for public holiday calendar that supports more than 70 countries.

io.kestra.plugin.core.condition.TimeBetweenCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.TimeBetweenCondition
      after: "16:19:12+02:00"
type const: "io.kestra.plugin.core.condition.TimeBetweenCondition" required
Constant: "io.kestra.plugin.core.condition.TimeBetweenCondition"
after string

Must be a valid ISO 8601 time with offset.

format=time
before string

Must be a valid ISO 8601 time with offset.

format=time
date string

Can be any variable or any valid ISO 8601 time. By default, it will use the trigger date.

Default value is : "{{ trigger.date }}"

Default: "{{ trigger.date }}"
io.kestra.plugin.core.condition.WeekendCondition object
Examples
- conditions:
    - type: io.kestra.plugin.core.condition.WeekendCondition
type const: "io.kestra.plugin.core.condition.WeekendCondition" required
Constant: "io.kestra.plugin.core.condition.WeekendCondition"
date string

Can be any variable or any valid ISO 8601 datetime. By default, it will use the trigger date.

Default value is : "{{ trigger.date }}"

Default: "{{ trigger.date }}"
io.kestra.plugin.core.debug.Echo object

This task is deprecated, please use the io.kestra.plugin.core.log.Log task instead.##### Examples

id: echo_flow
namespace: company.team

tasks:
  - id: echo
    type: io.kestra.plugin.core.debug.Echo
    level: WARN
    format: "{{ task.id }} > {{ taskrun.startDate }}"

format string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.debug.Echo" required
Constant: "io.kestra.plugin.core.debug.Echo"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
level string

Default value is : INFO

Default: "INFO"
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.debug.Return object

This task is mostly useful for troubleshooting.

It allows you to return some templated functions, inputs or outputs.##### Examples

id:return_flow
namespace: company.team

tasks:
  - id: return
    type: io.kestra.plugin.core.debug.Return
    format: "{{ task.id }} > {{ taskrun.startDate }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.debug.Return" required
Constant: "io.kestra.plugin.core.debug.Return"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
format string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.execution.Count object

This can be used to send an alert if a condition is met about execution counts.##### Examples

Send a slack notification if there is no execution for a flow for the last 24 hours.

id: executions_count
namespace: company.team

tasks:
  - id: counts
    type: io.kestra.plugin.core.execution.Counts
    expression: "{{ count == 0 }}"
    flows:
      - namespace: company.team
        flowId: logs
    startDate: "{{ now() | dateAdd(-1, 'DAYS') }}"
  - id: each_parallel
    type: io.kestra.plugin.core.flow.EachParallel
    tasks:
      - id: slack_incoming_webhook
        type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook
        payload: |
          {
            "channel": "#run-channel",
            "text": ":warning: Flow `{{ jq taskrun.value '.namespace' true }}`.`{{ jq taskrun.value '.flowId' true }}` has no execution for last 24h!"
          }
        url: "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX"
    value: "{{ jq outputs.counts.results '. | select(. != null) | .[]' }}"

triggers:
  - id: schedule
    type: io.kestra.plugin.core.trigger.Schedule
    backfill: {}
    cron: "0 4 * * * "
expression string required

The expression is such that expression must return true in order to keep the current line. Some examples:

  • yaml {{ eq count 0 }} : no execution found
  • yaml {{ gte count 5 }} : more than 5 executions
flows array required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required
type const: "io.kestra.plugin.core.execution.Count" required
Constant: "io.kestra.plugin.core.execution.Count"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
endDate string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
states string[]
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.execution.Fail object

Used to fail the execution, for example, on a switch branch or on some conditions based on the execution context.##### Examples

Fail on a switch branch

id: fail_on_switch
namespace: company.team

inputs:
  - id: param
    type: STRING
    required: true

tasks:
  - id: switch
    type: io.kestra.plugin.core.flow.Switch
    value: "{{inputs.param}}"
    cases:
      case1:
        - id: case1
          type: io.kestra.plugin.core.log.Log
          message: Case 1
      case2:
        - id: case2
          type: io.kestra.plugin.core.log.Log
          message: Case 2
      notexist:
        - id: fail
          type: io.kestra.plugin.core.execution.Fail
      default:
        - id: default
          type: io.kestra.plugin.core.log.Log
          message: default

Fail on a condition

id: fail_on_condition
namespace: company.team

inputs:
  - name: param
    type: STRING
    required: true

tasks:
  - id: before
    type: io.kestra.plugin.core.debug.Echo
    format: I'm before the fail on condition 
  - id: fail
    type: io.kestra.plugin.core.execution.Fail
    condition: '{{ inputs.param == "fail" }}'
  - id: after
    type: io.kestra.plugin.core.debug.Echo
    format: I'm after the fail on condition 
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.execution.Fail" required
Constant: "io.kestra.plugin.core.execution.Fail"
allowFailure boolean

Default value is : false

Default: false
condition string

Boolean coercion allows 0, -0, and '' to coerce to false, all other values to coerce to true.

description string
disabled boolean

Default value is : false

Default: false
errorMessage string

Default value is : Task failure

Default: "Task failure"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.execution.Labels object
Examples

Add labels based on a webhook payload

id: webhook_based_labels
namespace: company.team
tasks:
  - id: update_labels_with_map
    type: io.kestra.plugin.core.execution.Labels
    labels:
      customerId: "{{ trigger.body.customerId }}"
  - id: by_list
    type: io.kestra.plugin.core.execution.Labels
    labels:
      - key: order_id
        value: "{{ trigger.body.orderId }}"
      - key: order_type
        value: "{{ trigger.body.orderType }}"
triggers:
  - id: webhook
    key: order_webhook
    type: io.kestra.plugin.core.trigger.Webhook
    conditions:
      - type: io.kestra.plugin.core.condition.ExpressionCondition
        expression: "{{ trigger.body.customerId is defined and trigger.body.orderId is defined and trigger.body.orderType is defined }}"
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
labels Record<string, string> required

The value should result in a list of labels or a labelKey:labelValue map

type const: "io.kestra.plugin.core.execution.Labels" required
Constant: "io.kestra.plugin.core.execution.Labels"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.execution.PurgeExecutions object

This task can be used to purge flow executions data for all flows, for a specific namespace, or for a specific flow.##### Examples

Purge all flow execution data for flows that ended more than one month ago.

endDate: "{{ now() | dateAdd(-1, 'MONTHS') }}"
states: 
 - KILLED
 - FAILED
 - WARNING
 - SUCCESS
endDate string required

All data of flows executed before this date will be purged.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.execution.PurgeExecutions" required
Constant: "io.kestra.plugin.core.execution.PurgeExecutions"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
flowId string

You need to provide the namespace properties if you want to purge a flow.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string

If flowId isn't provided, this is a namespace prefix, else the namespace of the flow.

purgeExecution boolean

Default value is : true

Default: true
purgeLog boolean

This will only purge logs from executions not from triggers, and it will do it execution by execution. The io.kestra.plugin.core.log.PurgeLogs task is a better fit to purge logs as it will purge logs in bulk, and will also purge logs not tied to an execution like trigger logs.

Default value is : true

Default: true
purgeMetric boolean

Default value is : true

Default: true
purgeStorage boolean

Default value is : true

Default: true
startDate string

All data of flows executed after this date will be purged.

states string[]

If not set, executions for any states will be purged.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.execution.Resume object
Examples
executionId: "{{ trigger.executionId }}"
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.execution.Resume" required
Constant: "io.kestra.plugin.core.execution.Resume"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
executionId string

If you explicitly define an executionId, Kestra will use that specific ID.

If another namespace and flowId properties are set, Kestra will look for a paused execution for that corresponding flow.

If executionId is not set, the task will use the ID of the current execution.

flowId string
inputs object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.AllowFailure object

If any child task of the AllowFailure task fails, the flow will stop executing this block of tasks (i.e. the next tasks in the AllowFailure block will no longer be executed), but the flow execution of the tasks, following the AllowFailure task, will continue.##### Examples

id: allow_failure
namespace: company.team

tasks:
  - id: sequential
    type: io.kestra.plugin.core.flow.AllowFailure
    tasks:
     - id: ko
       type: io.kestra.plugin.scripts.shell.Commands
       commands:
        - 'exit 1'
  - id: last
    type: io.kestra.plugin.core.debug.Return
    format: "{{ task.id }} > {{ taskrun.startDate }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.flow.AllowFailure" required
Constant: "io.kestra.plugin.core.flow.AllowFailure"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
tasks array
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.Dag object

List your tasks and their dependencies, and Kestra will figure out the execution sequence. Each task can only depend on other tasks from the DAG task. For technical reasons, low-code interaction via UI forms is disabled for now when using this task.##### Examples

Run a series of tasks for which the execution order is defined by their upstream dependencies.

id: dag_flow
namespace: company.team
tasks:
  - id: dag
    type: io.kestra.plugin.core.flow.Dag
    tasks:
      - task:
          id: task1
          type: io.kestra.plugin.core.log.Log
          message: task 1
      - task:
          id: task2
          type: io.kestra.plugin.core.log.Log
          message: task 2
        dependsOn:
          - task1
      - task:
          id: task3
          type: io.kestra.plugin.core.log.Log
          message: task 3
        dependsOn:
          - task1
      - task:
          id: task4
          type: io.kestra.plugin.core.log.Log
          message: task 4
        dependsOn:
          - task2
      - task:
          id: task5
          type: io.kestra.plugin.core.log.Log
          message: task 5
        dependsOn:
          - task4
          - task3

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
minItems=1
type const: "io.kestra.plugin.core.flow.Dag" required
Constant: "io.kestra.plugin.core.flow.Dag"
allowFailure boolean

Default value is : false

Default: false
concurrent integer

If the value is 0, no concurrency limit exists for the tasks in a DAG and all tasks that can run in parallel will start at the same time.

Default value is : 0

Default: 0
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.Dag-DagTask object
task required
dependsOn string[]
io.kestra.plugin.core.flow.EachParallel object

This task is deprecated, please use the io.kestra.plugin.core.flow.ForEach task instead.

The list of tasks will be executed for each item in parallel. The value must be a valid JSON string representing an array, e.g. a list of strings ["value1", "value2"] or a list of dictionaries [{"key": "value1"}, {"key": "value2"}]. You can access the current iteration value using the variable {{ taskrun.value }}.

The task list will be executed in parallel for each item. For example, if you have a list with 3 elements and 2 tasks defined in the list of tasks, all 6 tasks will be computed in parallel without any order guarantee.

If you want to execute a group of sequential tasks for each value in parallel, you can wrap the list of tasks with the Sequential task. If your list of values is large, you can limit the number of concurrent tasks using the concurrent property.

We highly recommend triggering a subflow for each value (e.g. using the ForEachItem task) instead of specifying many tasks wrapped in a Sequential task. This allows better scalability and modularity. Check the flow best practices documentation for more details.##### Examples

id: each_parallel
namespace: company.team

tasks:
  - id: each_parallel
    type: io.kestra.plugin.core.flow.EachParallel
    value: '["value 1", "value 2", "value 3"]'
    tasks:
      - id: each_value
        type: io.kestra.plugin.core.debug.Return
        format: "{{ task.id }} with current value '{{ taskrun.value }}'"

Create a file for each value in parallel, then process all files in the next task. Note how the inputFiles property uses a jq expression with a map function to extract the paths of all files processed in parallel and pass them into the next task's working directory.

id: parallel_script
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachParallel
    value: "{{ range(1, 9) }}"
    tasks:
      - id: script
        type: io.kestra.plugin.scripts.shell.Script
        outputFiles:
          - "out/*.txt"
        script: |
          mkdir out
          echo "{{ taskrun.value }}" > out/file_{{ taskrun.value }}.txt

  - id: process_all_files
    type: io.kestra.plugin.scripts.shell.Script
    inputFiles: "{{ outputs.script | jq('map(.outputFiles) | add') | first }}"
    script: |
      ls -h out/

Run a group of tasks for each value in parallel.

id: parallel_task_groups
namespace: company.team

tasks:
  - id: for_each
    type: io.kestra.plugin.core.flow.EachParallel
    value: ["value 1", "value 2", "value 3"]
    tasks:
      - id: group
        type: io.kestra.plugin.core.flow.Sequential
        tasks:
          - id: task1
            type: io.kestra.plugin.scripts.shell.Commands
            commands:
              - echo "{{task.id}} > {{ parents[0].taskrun.value }}"
              - sleep 1

          - id: task2
            type: io.kestra.plugin.scripts.shell.Commands
            commands:
              - echo "{{task.id}} > {{ parents[0].taskrun.value }}"
              - sleep 1

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tasks array required
minItems=1
type const: "io.kestra.plugin.core.flow.EachParallel" required
Constant: "io.kestra.plugin.core.flow.EachParallel"
value string | array required

The value can be passed as a string, a list of strings, or a list of objects.

allowFailure boolean

Default value is : false

Default: false
concurrent integer

If the value is 0, no limit exist and all the tasks will start at the same time.

Default value is : 0

Default: 0
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.EachSequential object

This task is deprecated, please use the io.kestra.plugin.core.flow.ForEach task instead.

The list of tasks will be executed for each item sequentially. The value must be a valid JSON string representing an array, e.g. a list of strings ["value1", "value2"] or a list of dictionaries [{"key": "value1"}, {"key": "value2"}].

You can access the current iteration value using the variable {{ taskrun.value }}. The task list will be executed sequentially for each item.

We highly recommend triggering a subflow for each value. This allows much better scalability and modularity. Check the flow best practices documentation and the following Blueprint for more details.##### Examples

The taskrun.value from the each_sequential task is available only to immediate child tasks such as the before_if and the if tasks. To access the taskrun value in child tasks of the if task (such as in the after_if task), you need to use the syntax {{ parent.taskrun.value }} as this allows you to access the taskrun value of the parent task each_sequential.

id: loop_example
namespace: company.team

tasks:
  - id: each_sequential
    type: io.kestra.plugin.core.flow.EachSequential
    value: ["value 1", "value 2", "value 3"]
    tasks:
      - id: before_if
        type: io.kestra.plugin.core.debug.Return
        format: 'Before if {{ taskrun.value }}'
      - id: if
        type: io.kestra.plugin.core.flow.If
        condition: '{{ taskrun.value == "value 2" }}'
        then:
          - id: after_if
            type: io.kestra.plugin.core.debug.Return
            format: "After if {{ parent.taskrun.value }}"

This task shows that the value can be a bullet-style list. The task iterates over the list of values and executes the each_value child task for each value.

id: each_sequential_flow
namespace: company.team

tasks:
  - id: each_sequential
    type: io.kestra.plugin.core.flow.EachSequential
    value:
      - value 1
      - value 2
      - value 3
    tasks:
      - id: each_value
        type: io.kestra.plugin.core.debug.Return
        format: "{{ task.id }} with value '{{ taskrun.value }}'"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.flow.EachSequential" required
Constant: "io.kestra.plugin.core.flow.EachSequential"
value string | array required

The value car be passed as a string, a list of strings, or a list of objects.

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
tasks array
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.ForEach object

You can control how many task groups are executed concurrently by setting the concurrencyLimit property.

  • If you set the concurrencyLimit property to 0, Kestra will execute all task groups concurrently for all values.
  • If you set the concurrencyLimit property to 1, Kestra will execute each task group one after the other starting with the task group for the first value in the list.

Regardless of the concurrencyLimit property, the tasks will run one after the other — to run those in parallel, wrap them in a Parallel task as shown in the last example below (see the flow parallel_tasks_example).

The values should be defined as a JSON string or an array, e.g. a list of string values ["value1", "value2"] or a list of key-value pairs [{"key": "value1"}, {"key": "value2"}].

You can access the current iteration value using the variable {{ taskrun.value }} or {{ parent.taskrun.value }} if you are in a nested child task.

If you need to execute more than 2-5 tasks for each value, we recommend triggering a subflow for each value for better performance and modularity. Check the flow best practices documentation for more details.##### Examples

The {{ taskrun.value }} from the for_each task is available only to direct child tasks such as the before_if and the if tasks. To access the taskrun value of the parent task in a nested child task such as the after_if task, use {{ parent.taskrun.value }}.

id: for_loop_example
namespace: company.team

tasks:
  - id: for_each
    type: io.kestra.plugin.core.flow.ForEach
    values: ["value 1", "value 2", "value 3"]
    tasks:
      - id: before_if
        type: io.kestra.plugin.core.debug.Return
        format: "Before if {{ taskrun.value }}"
      - id: if
        type: io.kestra.plugin.core.flow.If
        condition: '{{ taskrun.value == "value 2" }}'
        then:
          - id: after_if
            type: io.kestra.plugin.core.debug.Return
            format: "After if {{ parent.taskrun.value }}"

This flow uses YAML-style array for values. The task for_each iterates over a list of values and executes the return child task for each value. The concurrencyLimit property is set to 2, so the return task will run concurrently for the first two values in the list at first. The return task will run for the next two values only after the task runs for the first two values have completed.

id: for_each_value
namespace: company.team

tasks:
  - id: for_each
    type: io.kestra.plugin.core.flow.ForEach
    values:
      - value 1
      - value 2
      - value 3
      - value 4
    concurrencyLimit: 2
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ task.id }} with value {{ taskrun.value }}"

This example shows how to run tasks in parallel for each value in the list. All child tasks of the parallel task will run in parallel. However, due to the concurrencyLimit property set to 2, only two parallel task groups will run at any given time.

id: parallel_tasks_example
namespace: company.team

tasks:
  - id: for_each
    type: io.kestra.plugin.core.flow.ForEach
    values: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    concurrencyLimit: 2
    tasks:
      - id: parallel
        type: io.kestra.plugin.core.flow.Parallel
        tasks:
        - id: log
          type: io.kestra.plugin.core.log.Log
          message: Processing {{ parent.taskrun.value }}
        - id: shell
          type: io.kestra.plugin.scripts.shell.Commands
          commands:
            - sleep {{ parent.taskrun.value }}

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.flow.ForEach" required
Constant: "io.kestra.plugin.core.flow.ForEach"
values string | array required

The values can be passed as a string, a list of strings, or a list of objects.

allowFailure boolean

Default value is : false

Default: false
concurrencyLimit integer

If you set the concurrencyLimit property to 0, Kestra will execute all task groups concurrently for all values (zero limits!).

If you set the concurrencyLimit property to 1, Kestra will execute each task group one after the other starting with the first value in the list (limit concurrency to one task group that can be actively running at any time).

Default value is : 1

Default: 1
min=0
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
tasks array
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.ForEachItem object

The items value must be Kestra's internal storage URI e.g. an output file from a previous task, or a file from inputs of FILE type. Two special variables are available to pass as inputs to the subflow:

  • taskrun.items which is the URI of internal storage file containing the batch of items to process
  • taskrun.iteration which is the iteration or batch number##### Examples

Execute a subflow for each batch of items. The subflow orders is called from the parent flow orders_parallel using the ForEachItem task in order to start one subflow execution for each batch of items.

id: orders
namespace: company.team

inputs:
  - id: order
    type: STRING

tasks:
  - id: read_file
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - cat "{{ inputs.order }}"

  - id: read_file_content
    type: io.kestra.plugin.core.log.Log
    message: "{{ read(inputs.order) }}"
id: orders_parallel
namespace: company.team

tasks:
  - id: extract
    type: io.kestra.plugin.jdbc.duckdb.Query
    sql: |
      INSTALL httpfs;
      LOAD httpfs;
      SELECT *
      FROM read_csv_auto('https://huggingface.co/datasets/kestra/datasets/raw/main/csv/orders.csv', header=True);
    store: true

  - id: each
    type: io.kestra.plugin.core.flow.ForEachItem
    items: "{{ outputs.extract.uri }}"
    batch:
      rows: 1
    namespace: company.team
    flowId: orders
    wait: true # wait for the subflow execution
    transmitFailed: true # fail the task run if the subflow execution fails
    inputs:
      order: "{{ taskrun.items }}" # special variable that contains the items of the batch

Execute a subflow for each JSON item fetched from a REST API. The subflow mysubflow is called from the parent flow iterate_over_json using the ForEachItem task; this creates one subflow execution for each JSON object.

Note how we first need to convert the JSON array to JSON-L format using the JsonWriter task. This is because the items attribute of the ForEachItem task expects a file where each line represents a single item. Suitable file types include Amazon ION (commonly produced by Query tasks), newline-separated JSON files, or CSV files formatted with one row per line and without a header. For other formats, you can use the conversion tasks available in the io.kestra.plugin.serdes module.

In this example, the subflow mysubflow expects a JSON object as input. The JsonReader task first reads the JSON array from the REST API and converts it to ION. Then, the JsonWriter task converts that ION file to JSON-L format, suitable for the ForEachItem task.

id: mysubflow
namespace: company.team

inputs:
  - id: json
    type: JSON

tasks:
  - id: debug
    type: io.kestra.plugin.core.log.Log
    message: "{{ inputs.json }}"
id: iterate_over_json
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.fs.http.Download
    uri: "https://api.restful-api.dev/objects"
    contentType: application/json
    method: GET
    failOnEmptyResponse: true
    timeout: PT15S

  - id: json_to_ion
    type: io.kestra.plugin.serdes.json.JsonReader
    from: "{{ outputs.download.uri }}"
    newLine: false # regular json

  - id: ion_to_jsonl
    type: io.kestra.plugin.serdes.json.JsonWriter
    from: "{{ outputs.json_to_ion.uri }}"
    newLine: true # JSON-L

  - id: for_each_item
    type: io.kestra.plugin.core.flow.ForEachItem
    items: "{{ outputs.ion_to_jsonl.uri }}"
    batch:
      rows: 1
    namespace: company.team
    flowId: mysubflow
    wait: true
    transmitFailed: true
    inputs:
      json: "{{ json(read(taskrun.items)) }}"

This example shows how to use the combination of EachSequential and ForEachItem tasks to process files from an S3 bucket. The EachSequential iterates over files from the S3 trigger, and the ForEachItem task is used to split each file into batches. The process_batch subflow is then called with the data input parameter set to the URI of the batch to process.

id: process_batch
namespace: company.team

inputs:
  - id: data
    type: FILE

tasks:
  - id: debug
    type: io.kestra.plugin.core.log.Log
    message: "{{ read(inputs.data) }}"
id: process_files
namespace: company.team

tasks:
  - id: loop_over_files
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.objects | jq('.[].uri') }}"
    tasks:
      - id: subflow_per_batch
        type: io.kestra.plugin.core.flow.ForEachItem
        items: "{{ trigger.uris[parent.taskrun.value] }}"
        batch:
          rows: 1
        flowId: process_batch
        namespace: company.team
        wait: true
        transmitFailed: true
        inputs:
          data: "{{ taskrun.items }}"

triggers:
  - id: s3
    type: io.kestra.plugin.aws.s3.Trigger
    interval: "PT1S"
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "us-east-1"
    bucket: "my_bucket"
    prefix: "sub-dir"
    action: NONE

flowId string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
items string required
minLength=1
namespace string required
minLength=1
type const: "io.kestra.plugin.core.flow.ForEachItem" required
Constant: "io.kestra.plugin.core.flow.ForEachItem"
allowFailure boolean

Default value is : false

Default: false
batch
All of: io.kestra.plugin.core.flow.ForEachItem-Batch object, How to split the items into batches.
description string
disabled boolean

Default value is : false

Default: false
errors array
inheritLabels boolean

By default, labels are not passed to the subflow execution. If you set this option to true, the child flow execution will inherit all labels from the parent execution.

Default value is : false

Default: false
inputs object
labels object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
revision integer

By default, the last, i.e. the most recent, revision of the subflow is executed.

scheduleDate string | string
timeout string
format=duration
transmitFailed boolean

Note that this option works only if wait is set to true.

Default value is : true

Default: true
wait boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.ForEachItem-Batch object
bytes string

Can be provided as a string in the format "10MB" or "200KB", or the number of bytes. This allows you to process large files, slit them into smaller chunks by lines and process them in parallel. For example, MySQL by default limits the size of a query size to 16MB per query. Trying to use a bulk insert query with input data larger than 16MB will fail. Splitting the input data into smaller chunks is a common strategy to circumvent this limitation. By dividing a large data set into chunks smaller than the max_allowed_packet size (e.g., 10MB), you can insert the data in multiple smaller queries. This approach not only helps to avoid hitting the query size limit but can also be more efficient and manageable in terms of memory utilization, especially for very large datasets. In short, by splitting the file by bytes, you can bulk-insert smaller chunks of e.g. 10MB in parallel to avoid this limitation.

partitions integer
rows integer

Default value is : 1

Default: 1
separator string

Default value is : \n

Default: "\n"
io.kestra.plugin.core.flow.If object

Allow some workflow based on context variables, for example, branch a flow based on a previous task.##### Examples

id: if
namespace: company.team

inputs:
  - id: string
    type: STRING
    required: true

tasks:
  - id: if
    type: io.kestra.plugin.core.flow.If
    condition: "{{ inputs.string == 'Condition' }}"
    then:
      - id: when_true
        type: io.kestra.plugin.core.log.Log
        message: "Condition was true"
    else:
      - id: when_false
        type: io.kestra.plugin.core.log.Log
        message: "Condition was false"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
then array required
minItems=1
type const: "io.kestra.plugin.core.flow.If" required
Constant: "io.kestra.plugin.core.flow.If"
allowFailure boolean

Default value is : false

Default: false
condition string

Boolean coercion allows 0, -0, null and '' to evaluate to false, all other values will evaluate to true.

description string
disabled boolean

Default value is : false

Default: false
else array
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.Parallel object

This task runs all child tasks in parallel.##### Examples

id: parallel
namespace: company.team

tasks:
  - id: parallel
    type: io.kestra.plugin.core.flow.Parallel
    tasks:
      - id: 1st
        type: io.kestra.plugin.core.debug.Return
        format: "{{ task.id }} > {{ taskrun.startDate }}"

      - id: 2nd
        type: io.kestra.plugin.core.debug.Return
        format: "{{ task.id }} > {{ taskrun.id }}"

  - id: last
    type: io.kestra.plugin.core.debug.Return
    format: "{{ task.id }} > {{ taskrun.startDate }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tasks array required
minItems=1
type const: "io.kestra.plugin.core.flow.Parallel" required
Constant: "io.kestra.plugin.core.flow.Parallel"
allowFailure boolean

Default value is : false

Default: false
concurrent integer

If the value is 0, no limit exist and all tasks will start at the same time.

Default value is : 0

Default: 0
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.Pause object
Examples

Pause the execution and wait for a manual approval

id: human_in_the_loop
namespace: company.team

tasks:
  - id: before_approval
    type: io.kestra.plugin.core.debug.Return
    format: Output data that needs to be validated by a human

  - id: pause
    type: io.kestra.plugin.core.flow.Pause

  - id: run_post_approval
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - echo "Manual approval received! Continuing the execution..."

  - id: post_resume
    type: io.kestra.plugin.core.debug.Return
    format: "{{ task.id }} started on {{ taskrun.startDate }} after the Pause"

Vacation approval process pausing the execution for approval and waiting for input from a human to approve or reject the request.

id: vacation_approval_process
namespace: company.team

inputs:
  - id: request.name
    type: STRING
    defaults: Rick Astley

  - id: request.start_date
    type: DATE
    defaults: 2042-07-01

  - id: request.end_date
    type: DATE
    defaults: 2042-07-07

  - id: slack_webhook_uri
    type: URI
    defaults: https://reqres.in/api/slack

tasks:
  - id: send_approval_request
    type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook
    url: "{{ inputs.slack_webhook_uri }}"
    payload: |
      {
        "channel": "#vacation",
        "text": "Validate holiday request for {{ inputs.request.name }}. To approve the request, click on the `Resume` button here http://localhost:28080/ui/executions/{{flow.namespace}}/{{flow.id}}/{{execution.id}}"
      }

  - id: wait_for_approval
    type: io.kestra.plugin.core.flow.Pause
    onResume:
      - id: approved
        description: Whether to approve the request
        type: BOOLEAN
        defaults: true
      - id: reason
        description: Reason for approval or rejection
        type: STRING
        defaults: Well-deserved vacation

  - id: approve
    type: io.kestra.plugin.core.http.Request
    uri: https://reqres.in/api/products
    method: POST
    contentType: application/json
    body: "{{ inputs.request }}"

  - id: log
    type: io.kestra.plugin.core.log.Log
    message: Status is {{ outputs.wait_for_approval.onResume.reason }}. Process finished with {{ outputs.approve.body }}

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.flow.Pause" required
Constant: "io.kestra.plugin.core.flow.Pause"
allowFailure boolean

Default value is : false

Default: false
delay string

The delay is a string in the ISO 8601 Duration format, e.g. PT1H for 1 hour, PT30M for 30 minutes, PT10S for 10 seconds, P1D for 1 day, etc. If no delay and no timeout are configured, the execution will never end until it's manually resumed from the UI or API.

format=duration
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
onResume array

Before resuming the execution, the user will be prompted to fill in these inputs. The inputs can be used to pass additional data to the execution which is useful for human-in-the-loop scenarios. The onResume inputs work the same way as regular flow inputs — they can be of any type and can have default values. You can access those values in downstream tasks using the onResume output of the Pause task.

tasks array
timeout string

If no delay and no timeout are configured, the execution will never end until it's manually resumed from the UI or API.

format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.Sequential object

Used to visually group tasks.##### Examples

id: sequential
namespace: company.team

tasks:
  - id: sequential
    type: io.kestra.plugin.core.flow.Sequential
    tasks:
      - id: first_task
        type: io.kestra.plugin.core.debug.Return
        format: "{{ task.id }} > {{ taskrun.startDate }}"

      - id: second_task
        type: io.kestra.plugin.core.debug.Return
        format: "{{ task.id }} > {{ taskrun.id }}"

  - id: last
    type: io.kestra.plugin.core.debug.Return
    format: "{{ task.id }} > {{ taskrun.startDate }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.flow.Sequential" required
Constant: "io.kestra.plugin.core.flow.Sequential"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
tasks array
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.Subflow object
Examples

Run a subflow with custom inputs.

id: running_subflow
namespace: company.team

tasks:
  - id: call_subflow
    type: io.kestra.plugin.core.flow.Subflow
    namespace: company.team
    flowId: subflow
    inputs:
      user: "Rick Astley"
      favorite_song: "Never Gonna Give You Up"
    wait: true
    transmitFailed: true

flowId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
minLength=1
type const: "io.kestra.plugin.core.flow.Subflow" required
Constant: "io.kestra.plugin.core.flow.Subflow"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
inheritLabels boolean

By default, labels are not passed to the subflow execution. If you set this option to true, the child flow execution will inherit all labels from the parent execution.

Default value is : false

Default: false
inputs object
labels object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
outputs object

Allows to specify outputs as key-value pairs to extract any outputs from the subflow execution into output of this task execution.This property is deprecated since v0.15.0, please use the outputs property on the Subflow definition for defining the output values available and exposed to this task execution.

revision integer

By default, the last, i.e. the most recent, revision of the subflow is executed.

min=1
scheduleDate string | string
timeout string
format=duration
transmitFailed boolean

Note that this option works only if wait is set to true.

Default value is : true

Default: true
wait boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.Switch object

This task runs a set of tasks based on a given value. The value is evaluated at runtime and compared to the list of cases. If the value matches a case, the corresponding tasks are executed. If the value does not match any case, the default tasks are executed.##### Examples

id: switch
namespace: company.team

inputs:
  - id: string
    type: STRING
    required: true

tasks:
  - id: switch
    type: io.kestra.plugin.core.flows.Switch
    value: "{{ inputs.string }}"
    cases:
      FIRST:
        - id: first
          type: io.kestra.plugin.core.debug.Return
          format: "{{ task.id }} > {{ taskrun.startDate }}"
      SECOND:
        - id: second
          type: io.kestra.plugin.core.debug.Return
          format: "{{ task.id }} > {{ taskrun.startDate }}"
      THIRD:
        - id: third
          type: io.kestra.plugin.core.debug.Return
          format: "{{ task.id }} > {{ taskrun.startDate }}"
    defaults:
      - id: default
        type: io.kestra.plugin.core.debug.Return
        format: "{{ task.id }} > {{ taskrun.startDate }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.flow.Switch" required
Constant: "io.kestra.plugin.core.flow.Switch"
value string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
cases object
defaults array
description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.Template object
Examples
id: template
namespace: company.team

inputs:
  - id: with_string
    type: STRING

tasks:
  - id: 1_return
    type: io.kestra.plugin.core.debug.Return
    format: "{{ task.id }} > {{ taskrun.startDate }}"

  - id: 2_template
    type: io.kestra.plugin.core.flow.Template
    namespace: company.team
    templateId: template
    args:
      my_forward: "{{ inputs.with_string }}"

  - id: 3_end
    type: io.kestra.plugin.core.debug.Return
    format: "{{ task.id }} > {{ taskrun.startDate }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
templateId string required
type const: "io.kestra.plugin.core.flow.Template" required
Constant: "io.kestra.plugin.core.flow.Template"
allowFailure boolean

Default value is : false

Default: false
args Record<string, string>

You can provide a list of named arguments (like function argument on dev) allowing to rename outputs of current flow for this template. For example, if you declare this use of template like this:

  - id: 2-template
    type: io.kestra.plugin.core.flow.Template
    namespace: io.kestra.tests
    templateId: template
    args:
      forward: "{{ output.task-id.uri }}"

You will be able to get this output on the template with {{ parent.outputs.args.forward }}.

description string
disabled boolean

Default value is : false

Default: false
errors array
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
tenantId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.WaitFor object

Use this task if your workflow requires blocking calls polling for a job to finish or for some external API to return a specific HTTP response.

You can access the outputs of the nested tasks in the condition property. The condition is evaluated after all nested task runs finish.

Examples

Run a task until it returns a specific value. Note how you don't need to take care of incrementing the iteration count. The task will loop and keep track of the iteration outputs behind the scenes — you only need to specify the exit condition for the loop.

id: wait_for
namespace: company.team

tasks:
  - id: loop
    type: io.kestra.plugin.core.flow.WaitFor
    condition: "{{ outputs.return.value == '4' }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ outputs.loop.iterationCount }}"

condition string required

Boolean coercion allows 0, -0, null and '' to evaluate to false; all other values will evaluate to true.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tasks array required
type const: "io.kestra.plugin.core.flow.WaitFor" required
Constant: "io.kestra.plugin.core.flow.WaitFor"
allowFailure boolean

Default value is : false

Default: false
checkFrequency
All of: io.kestra.plugin.core.flow.WaitFor-CheckFrequency object, Check the frequency configuration.
description string
disabled boolean

Default value is : false

Default: false
errors array
failOnMaxReached boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.WaitFor-CheckFrequency object
interval string

Default value is : 1.000000000

Default: 1.0
format=duration
maxDuration string

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxIterations integer

Default value is : 100

Default: 100
io.kestra.plugin.core.flow.WorkingDirectory object

Tasks are stateless by default. Kestra will launch each task within a temporary working directory on a Worker. The WorkingDirectory task allows reusing the same file system's working directory across multiple tasks so that multiple sequential tasks can use output files from previous tasks without having to use the outputs.taskId.outputName syntax. Note that the WorkingDirectory only works with runnable tasks because those tasks are executed directly on the Worker. This means that using flowable tasks such as the Parallel task within the WorkingDirectory task will not work. ##### Examples

Clone a Git repository into the Working Directory and run a Python script in a Docker container.

id: git_python
namespace: company.team

tasks:
  - id: wdir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/examples
        branch: main

      - id: python
        type: io.kestra.plugin.scripts.python.Commands
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        containerImage: ghcr.io/kestra-io/pydata:latest
        commands:
          - python scripts/etl_script.py

Add input and output files within a Working Directory to use them in a Python script.

    id: api_json_to_mongodb
    namespace: company.team

    tasks:
      - id: wdir
        type: io.kestra.plugin.core.flow.WorkingDirectory
        outputFiles:
          - output.json
        inputFiles:
          query.sql: |
            SELECT sum(total) as total, avg(quantity) as avg_quantity
            FROM sales;
        tasks:
          - id: inline_script
            type: io.kestra.plugin.scripts.python.Script
            taskRunner:
              type: io.kestra.plugin.scripts.runner.docker.Docker
            containerImage: python:3.11-slim
            beforeCommands:
              - pip install requests kestra > /dev/null
            warningOnStdErr: false
            script: |
              import requests
              import json
              from kestra import Kestra

              with open('query.sql', 'r') as input_file:
                  sql = input_file.read()

              response = requests.get('https://api.github.com')
              data = response.json()

              with open('output.json', 'w') as output_file:
                  json.dump(data, output_file)

              Kestra.outputs({'receivedSQL': sql, 'status': response.status_code})

      - id: load_to_mongodb
        type: io.kestra.plugin.mongodb.Load
        connection:
          uri: mongodb://host.docker.internal:27017/
        database: local
        collection: github
        from: "{{ outputs.wdir.uris['output.json'] }}"

id: working_directory
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: first
        type: io.kestra.plugin.scripts.shell.Commands
        commands:
        - 'echo "{{ taskrun.id }}" > {{ workingDir }}/stay.txt'
      - id: second
        type: io.kestra.plugin.scripts.shell.Commands
        commands:
        - |
          echo '::{"outputs": {"stay":"'$(cat {{ workingDir }}/stay.txt)'"}}::''

A working directory with a cache of the node_modules directory.

id: node_with_cache
namespace: company.team

tasks:
  - id: working_dir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    cache:
      patterns:
        - node_modules/**
      ttl: PT1H
    tasks:
      - id: script
        type: io.kestra.plugin.scripts.node.Script
        beforeCommands:
          - npm install colors
        script: |
          const colors = require("colors");
          console.log(colors.red("Hello"));

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.flow.WorkingDirectory" required
Constant: "io.kestra.plugin.core.flow.WorkingDirectory"
allowFailure boolean

Default value is : false

Default: false
cache
All of: io.kestra.plugin.core.flow.WorkingDirectory-Cache object, Cache configuration.
description string
disabled boolean

Default value is : false

Default: false
errors array
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

tasks array
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.flow.WorkingDirectory-Cache object
patterns string[] required

For example, 'node_modules/**' will include all files of the node_modules directory including sub-directories.

ttl string
format=duration
io.kestra.plugin.core.http.Download object

This task connects to a HTTP server and copy a file to Kestra's internal storage.##### Examples

Download a CSV file.

id: download
namespace: company.team

tasks:
  - id: extract
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/orders.csv
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.http.Download" required
Constant: "io.kestra.plugin.core.http.Download"
uri string required
allowFailure boolean

Default value is : false

Default: false
body string
contentType string
description string
disabled boolean

Default value is : false

Default: false
failOnEmptyResponse boolean

Default value is : true

Default: true
formData object
headers object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
method string

Default value is : GET

Default: "GET"
Values: "OPTIONS" "GET" "HEAD" "POST" "PUT" "DELETE" "TRACE" "CONNECT" "PATCH" "CUSTOM"
options
All of: io.kestra.plugin.core.http.HttpInterface-RequestOptions object, The HTTP request options
sslOptions
All of: io.kestra.plugin.core.http.HttpInterface-SslOptions object, The SSL request options
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.http.HttpInterface-RequestOptions object
basicAuthPassword string
basicAuthUser string
connectTimeout string
format=duration
connectionPoolIdleTimeout string

Default value is : 0.0

Default: 0.0
format=duration
defaultCharset
All of: java.nio.charset.Charset object, The default charset for the request.
followRedirects boolean

Default value is : true

Default: true
logLevel string
Values: "ALL" "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "OFF" "NOT_SPECIFIED"
maxContentLength integer

Default value is : 10485760

Default: 10485760
proxyAddress string
proxyPassword string
proxyPort integer
proxyType string

Default value is : DIRECT

Default: "DIRECT"
Values: "DIRECT" "HTTP" "SOCKS"
proxyUsername string
readIdleTimeout string

Default value is : 300.000000000

Default: 300.0
format=duration
readTimeout string

Default value is : 10.000000000

Default: 10.0
format=duration
io.kestra.plugin.core.http.HttpInterface-SslOptions object
insecureTrustAllCertificates boolean

Only applies if no trust store is configured. Note: This makes the SSL connection insecure and should only be used for testing. If you are using a self-signed certificate, set up a trust store instead.

io.kestra.plugin.core.http.Request object

This task makes an API call to a specified URL of an HTTP server and stores the response as output. By default, the maximum length of the response is limited to 10MB, but it can be increased to at most 2GB by using the options.maxContentLength property. Note that the response is added as output to the task. If you need to process large API payloads, we recommend using the Download task instead.##### Examples

Execute a Kestra flow via an HTTP POST request authenticated with basic auth. To pass a user input to the API call, we use the formData property. When using form data, make sure to set the contentType property to multipart/form-data.

id: api_call
namespace: company.team

tasks:
  - id: basic_auth_api
    type: io.kestra.plugin.core.http.Request
    uri: http://host.docker.internal:8080/api/v1/executions/dev/inputs_demo
    options:
      basicAuthUser: admin
      basicAuthPassword: admin
    method: POST
    contentType: multipart/form-data
    formData:
      user: John Doe

Execute a Kestra flow via an HTTP request authenticated with a Bearer auth token.

id: api_auth_call
namespace: company.team

tasks:
  - id: auth_token_api
    type: io.kestra.plugin.core.http.Request
    uri: https://dummyjson.com/user/me
    method: GET
    headers:
      authorization: 'Bearer <TOKEN>'

Make an HTTP GET request with a timeout. The timeout property specifies the maximum time allowed for the entire task to run, while the options.connectTimeout, options.readTimeout, options.connectionPoolIdleTimeout, and options.readIdleTimeout properties specify the time allowed for establishing a connection, reading data from the server, keeping an idle connection in the client's connection pool, and keeping a read connection idle before closing it, respectively.

id: timeout
namespace: company.team

tasks:
  - id: http
    type: io.kestra.plugin.core.http.Request
    uri: https://reqres.in/api/long-request
    timeout: PT10M # no default
    method: GET
    options:
      connectTimeout: PT1M # no default
      readTimeout: PT30S # 10 seconds by default
      connectionPoolIdleTimeout: PT10S # 0 seconds by default
      readIdleTimeout: PT10M # 300 seconds by default

Make a HTTP request and process its output. Given that we send a JSON payload in the request body, we need to use application/json as content type.

id: http_post_request_example
namespace: company.team

inputs:
  - id: payload
    type: JSON
    defaults: |
      {"title": "Kestra Pen"}

tasks:
  - id: send_data
    type: io.kestra.plugin.core.http.Request
    uri: https://dummyjson.com/products/add
    method: POST
    contentType: application/json
    body: "{{ inputs.payload }}"

  - id: print_status
    type: io.kestra.plugin.core.log.Log
    message: '{{ outputs.send_data.body }}'

Send an HTTP POST request to a webserver.

id: http_post_request_example
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.http.Request
    uri: "https://server.com/login"
    headers:
      user-agent: "kestra-io"
    method: "POST"
    formData:
      user: "user"
      password: "pass"

Send a multipart HTTP POST request to a webserver.

id: http_post_multipart_example
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: send_data
    type: io.kestra.plugin.core.http.Request
    uri: "https://server.com/upload"
    headers:
      user-agent: "kestra-io"
    method: "POST"
    contentType: "multipart/form-data"
    formData:
      user: "{{ inputs.file }}"

Send a multipart HTTP POST request to a webserver and set a custom file name.

id: http_post_multipart_example
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: send_data
    type: io.kestra.plugin.core.http.Request
    uri: "https://server.com/upload"
    headers:
      user-agent: "kestra-io"
    method: "POST"
    contentType: "multipart/form-data"
    formData:
      user:
        name: "my-file.txt"
        content: "{{ inputs.file }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.http.Request" required
Constant: "io.kestra.plugin.core.http.Request"
uri string required
allowFailed boolean

Default value is : false

Default: false
allowFailure boolean

Default value is : false

Default: false
body string
contentType string
description string
disabled boolean

Default value is : false

Default: false
encryptBody boolean

If this property is set to true, this task will output the request body using the encryptedBody output property; otherwise, the request body will be stored in the body output property.

Default value is : false

Default: false
formData object
headers object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
method string

Default value is : GET

Default: "GET"
Values: "OPTIONS" "GET" "HEAD" "POST" "PUT" "DELETE" "TRACE" "CONNECT" "PATCH" "CUSTOM"
options
All of: io.kestra.plugin.core.http.HttpInterface-RequestOptions object, The HTTP request options
sslOptions
All of: io.kestra.plugin.core.http.HttpInterface-SslOptions object, The SSL request options
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.http.Trigger object
Examples

Send a Slack alert if the price is below a certain threshold. The flow will be triggered every 30 seconds until the condition is met. Then, the stopAfter property will disable the trigger to avoid unnecessary API calls and alerts.

id: http_price_alert
namespace: company.team

tasks:
  - id: send_slack_alert
    type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook
    url: "{{ secret('SLACK_WEBHOOK') }}"
    payload: |
      {
        "channel": "#price-alerts",
        "text": "The price is now: {{ json(trigger.body).price }}"
      }

triggers:
  - id: http
    type: io.kestra.plugin.core.http.Trigger
    uri: https://fakestoreapi.com/products/1
    responseCondition: "{{ json(response.body).price <= 110 }}"
    interval: PT30S
    stopAfter:
      - SUCCESS

Trigger a flow if an HTTP endpoint returns a status code equals to 200

id: http_trigger
namespace: company.team

tasks:
  - id: log_response
    type: io.kestra.plugin.core.log.Log
    message: '{{ trigger.body }}'

triggers:
  - id: http
    type: io.kestra.plugin.core.http.Trigger
    uri: https://api.chucknorris.io/jokes/random
    responseCondition: "{{ response.statusCode == 200 }}"
    stopAfter:
      - SUCCESS

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.http.Trigger" required
Constant: "io.kestra.plugin.core.http.Trigger"
uri string required
body string
conditions array
contentType string
description string
disabled boolean

Default value is : false

Default: false
encryptBody boolean

When true, the encryptedBody output will be filled, otherwise the body output will be filled

Default value is : false

Default: false
formData object
headers object
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
method string

Default value is : GET

Default: "GET"
Values: "OPTIONS" "GET" "HEAD" "POST" "PUT" "DELETE" "TRACE" "CONNECT" "PATCH" "CUSTOM"
options
All of: io.kestra.plugin.core.http.HttpInterface-RequestOptions object, The HTTP request options
responseCondition string

The condition will be evaluated after calling the HTTP endpoint, it can use the response itself to determine whether to start a flow or not. The following variables are available when evaluating the condition:

  • response.statusCode: the response HTTP status code
  • response.body: the response body as a string
  • response.headers: the response headers

Boolean coercion allows 0, -0, null and '' to evaluate to false, all other values will evaluate to true.

The condition will be evaluated before any 'generic trigger conditions' that can be configured via the conditions property.

Default value is : "{{ response.statusCode < 400 }}"

Default: "{{ response.statusCode < 400 }}"
sslOptions
All of: io.kestra.plugin.core.http.HttpInterface-SslOptions object, The SSL request options
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.kv.Delete object
Examples

Delete a KV pair.

id: kv_store_delete
namespace: company.team

tasks:
  - id: kv_delete
    type: io.kestra.plugin.core.kv.Delete
    key: my_variable
    namespace: dev # the current namespace of the flow will be used by default

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.kv.Delete" required
Constant: "io.kestra.plugin.core.kv.Delete"
allowFailure boolean
description string
disabled boolean
errorOnMissing boolean
key string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean
namespace string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.kv.Get object
Examples

Get value for my_variable key in dev namespace and fail if it's not present.

id: kv_store_get
namespace: company.team

tasks:
  - id: kv_get
    type: io.kestra.plugin.core.kv.Get
    key: my_variable
    namespace: dev # the current namespace of the flow will be used by default
    errorOnMissing: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.core.kv.Get" required
Constant: "io.kestra.plugin.core.kv.Get"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string

Default value is : "{{ flow.namespace }}"

Default: "{{ flow.namespace }}"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.kv.GetKeys object
Examples

Get keys that are prefixed by my_var.

id: kv_store_getkeys
namespace: company.team

tasks:
  - id: kv_getkeys
    type: io.kestra.plugin.core.kv.GetKeys
    prefix: my_var
    namespace: dev # the current namespace of the flow will be used by default

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.kv.GetKeys" required
Constant: "io.kestra.plugin.core.kv.GetKeys"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string

Default value is : "{{ flow.namespace }}"

Default: "{{ flow.namespace }}"
prefix string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.kv.Set object
Examples

Set the task's uri output as a value for orders_file key.

id: kv_store_set
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/orders.csv

  - id: kv_set
    type: io.kestra.plugin.core.kv.Set
    key: orders_file
    value: "{{ outputs.http_download.uri }}"
    kvType: STRING

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.core.kv.Set" required
Constant: "io.kestra.plugin.core.kv.Set"
value string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
kvType string
Values: "STRING" "NUMBER" "BOOLEAN" "DATETIME" "DATE" "DURATION" "JSON"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string

Default value is : "{{ flow.namespace }}"

Default: "{{ flow.namespace }}"
overwrite boolean

Default value is : true

Default: true
timeout string
format=duration
ttl string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.log.Fetch object

This task is useful to automate moving logs between various systems and environments.##### Examples

level: INFO
executionId: "{{ trigger.executionId }}"
level: WARN
executionId: "{{ execution.id }}"
tasksId: 
  - "previous_task_id"
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.log.Fetch" required
Constant: "io.kestra.plugin.core.log.Fetch"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
executionId string

If not set, the task will use the ID of the current execution. If set, it will try to locate the execution on the current flow unless the namespace and flowId properties are set.

flowId string
level string

Default value is : INFO

Default: "INFO"
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string
tasksId string[]
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.log.Log object
Examples
level: DEBUG
message: "{{ task.id }} > {{ taskrun.startDate }}"

Log one or more messages to the console.

id: hello_world
namespace: company.team

tasks:
  - id: greeting
    type: io.kestra.plugin.core.log.Log
    message:
      - Kestra team wishes you a great day 👋
      - If you need some help, reach out via Slack
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
message string | string[] required

It can be a string or an array of strings.

type const: "io.kestra.plugin.core.log.Log" required
Constant: "io.kestra.plugin.core.log.Log"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
level string

Default value is : INFO

Default: "INFO"
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.log.PurgeLogs object

This task can be used to purge flow execution and trigger logs for all flows, for a specific namespace, or for a specific flow.##### Examples

Purge all logs that has been created more than one month ago.

endDate: "{{ now() | dateAdd(-1, 'MONTHS') }}"

Purge all logs that has been created more than one month ago, but keep error logs.

endDate: "{{ now() | dateAdd(-1, 'MONTHS') }}"
logLevels:
  - TRACE
  - DEBUG
  - INFO
  - WARN
endDate string required

All logs before this date will be purged.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.log.PurgeLogs" required
Constant: "io.kestra.plugin.core.log.PurgeLogs"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
flowId string

You need to provide the namespace properties if you want to purge a flow logs.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logLevels string[]

If not set, log for any levels will be purged.

logToFile boolean

Default value is : false

Default: false
namespace string

If flowId isn't provided, this is a namespace prefix, else the namespace of the flow.

startDate string

All logs after this date will be purged.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.namespace.DeleteFiles object
Examples

Delete namespace files that match a specific regex glob pattern.

id: delete_files
namespace: company.team
tasks:
  - id: delete
    type: io.kestra.plugin.core.namespace.DeleteFiles
    namespace: tutorial
    files:
      - "**.upl"

Delete all namespace files from a specific namespace.

id: delete_all_files
namespace: company.team
tasks:
  - id: delete
    type: io.kestra.plugin.core.namespace.DeleteFiles
    namespace: tutorial
    files:
      - "**"

files array | string required

String or a list of strings; each string can either be a regex glob pattern or a file path URI.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
type const: "io.kestra.plugin.core.namespace.DeleteFiles" required
Constant: "io.kestra.plugin.core.namespace.DeleteFiles"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.namespace.DownloadFiles object

Use a regex glob pattern or a file path to download files from your namespace files. This can be useful to share code between projects and teams, which is located in different namespaces.##### Examples

Download a namespace file.

id: download_file
namespace: company.team
tasks:
  - id: download
    type: io.kestra.plugin.core.namespace.DownloadFiles
    namespace: tutorial
    files:
      - "**input.txt"

Download all namespace files from a specific namespace.

id: download_all_files
namespace: company.team
tasks:
  - id: download
    type: io.kestra.plugin.core.namespace.DownloadFiles
    namespace: tutorial
    files:
      - "**"

files array | string required

String or a list of strings; each string can either be a regex glob pattern or a file path URI.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
type const: "io.kestra.plugin.core.namespace.DownloadFiles" required
Constant: "io.kestra.plugin.core.namespace.DownloadFiles"
allowFailure boolean

Default value is : false

Default: false
description string
destination string

Default value is : --- ""

Default: ""
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.namespace.UploadFiles object

Use a regex glob pattern or a file path to upload files as Namespace Files. When using a map with the desired file name as key and file path as value, you can also rename or relocate files.##### Examples

Upload files generated by a previous task using the filesMap property.

id: upload_files_from_git
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.core.http.Download
    uri: https://github.com/kestra-io/scripts/archive/refs/heads/main.zip

  - id: unzip
    type: io.kestra.plugin.compress.ArchiveDecompress
    from: "{{ outputs.download.uri }}"
    algorithm: ZIP

  - id: upload
    type: io.kestra.plugin.core.namespace.UploadFiles
    filesMap: "{{ outputs.unzip.files }}"
    namespace: "{{ flow.namespace }}"

Upload a folder using a glob pattern. Note that the Regex syntax requires a glob pattern inspired by Apache Ant patterns. Make sure that your pattern starts with glob:, followed by the pattern. For example, use glob:**/dbt/** to upload the entire dbt folder (with all files and subdirectories) regardless of that folder's location in the directory structure.

id: upload_dbt_project
namespace: company.team

tasks:
  - id: wdir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: git_clone
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-example
        branch: master

      - id: upload
        type: io.kestra.plugin.core.namespace.UploadFiles
        files:
          - "glob:**/dbt/**"
        namespace: "{{ flow.namespace }}"

Upload a specific file and rename it.

id: upload_a_file
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.core.http.Download
    uri: https://github.com/kestra-io/scripts/archive/refs/heads/main.zip

  - id: unzip
    type: io.kestra.plugin.compress.ArchiveDecompress
    from: "{{ outputs.download.uri }}"
    algorithm: ZIP

  - id: upload
    type: io.kestra.plugin.core.namespace.UploadFiles
    filesMap:
      LICENCE: "{{ outputs.unzip.files['scripts-main/LICENSE'] }}"
    namespace: "{{ flow.namespace }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
type const: "io.kestra.plugin.core.namespace.UploadFiles" required
Constant: "io.kestra.plugin.core.namespace.UploadFiles"
allowFailure boolean

Default value is : false

Default: false
conflict string

Can be one of the following options: OVERWRITE, ERROR or SKIP. Default is OVERWRITE.

Default value is : OVERWRITE

Default: "OVERWRITE"
Values: "OVERWRITE" "ERROR" "SKIP"
description string
destination string

Required when providing a list of files.

Default value is : /

Default: "/"
disabled boolean

Default value is : false

Default: false
files string[]

This should be a list of Regex matching the Apache Ant patterns.It's primarily intended to be used with the WorkingDirectory task

filesMap object | string

This should be a map of URI, with the key being the filename that will be upload, and the key the URI.This one is intended to be used with output files of other tasks. Many Kestra tasks, incl. all Downloads tasks, output a map of files so that you can directly pass the output property to this task e.g. outputFiles in the S3 Downloads task or the files in the Archive Decompress task.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.output.OutputValues object

You can use this task to return some outputs and pass them to downstream tasks. It's helpful for parsing and returning values from a task. You can then access these outputs in your downstream tasks using the expression {{ outputs.mytask_id.values.my_output_name }} and you can see them in the Outputs tab.

Examples
id: outputs_flow
namespace: company.team

tasks:
  - id: output_values
    type: io.kestra.plugin.core.output.OutputValues
    values:
      taskrun_data: "{{ task.id }} > {{ taskrun.startDate }}"
      execution_data: "{{ flow.id }} > {{ execution.startDate }}"

  - id: log_values
    type: io.kestra.plugin.core.log.Log
    message: |
      Got the following outputs from the previous task:
      {{ outputs.output_values.values.taskrun_data }}
      {{ outputs.output_values.values.execution_data }}
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.output.OutputValues" required
Constant: "io.kestra.plugin.core.output.OutputValues"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
values object
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.runner.Process object

To access the task's working directory, use the {{workingDir}} Pebble expression or the WORKING_DIR environment variable. Input files and namespace files will be available in this directory.

To generate output files you can either use the outputFiles task's property and create a file with the same name in the task's working directory, or create any file in the output directory which can be accessed by the {{outputDir}} Pebble expression or the OUTPUT_DIR environment variables.

Note that:

  • This task runner is independent of any Operating System. You can use it equally on Linux, Mac or Windows without any additional configuration.
  • When the Kestra Worker running this task is shut down, the process will be interrupted and re-created as soon as the worker is restarted.##### Examples

Execute a Shell command.

id: new_shell
namespace: company.team

tasks:
  - id: shell
    type: io.kestra.plugin.scripts.shell.Commands
    taskRunner:
      type: io.kestra.plugin.core.runner.Process
    commands:
      - echo "Hello World"

Install custom Python packages before executing a Python script. Note how we use the --break-system-packages flag to avoid conflicts with the system packages. Make sure to use this flag if you see errors similar to error: externally-managed-environment.

id: before_commands_example
namespace: company.team

inputs:
  - id: url
    type: URI
    defaults: https://jsonplaceholder.typicode.com/todos/1

tasks:
  - id: transform
    type: io.kestra.plugin.scripts.python.Script
    taskRunner:
      type: io.kestra.plugin.core.runner.Process
    beforeCommands:
      - pip install kestra requests --break-system-packages
    script: |
      import requests
      from kestra import Kestra

      url = "{{ inputs.url }}"

      response = requests.get(url)
      print('Status Code:', response.status_code)
      Kestra.outputs(response.json())

Pass input files to the task, execute a Shell command, then retrieve output files.

id: new_shell_with_file
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: shell
    type: io.kestra.plugin.scripts.shell.Commands
    inputFiles:
      data.txt: "{{inputs.file}}"
    outputFiles:
      - out.txt
    taskRunner:
      type: io.kestra.plugin.core.runner.Process
    commands:
      - cp {{workingDir}}/data.txt {{workingDir}}/out.txt
type const: "io.kestra.plugin.core.runner.Process" required
Constant: "io.kestra.plugin.core.runner.Process"
io.kestra.plugin.core.state.Delete object
Examples

Delete the default state for the current flow.

id: delete_state
type: io.kestra.plugin.core.state.Delete

Delete the myState state for the current flow.

id: delete_state
type: io.kestra.plugin.core.state.Delete
name: myState
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.state.Delete" required
Constant: "io.kestra.plugin.core.state.Delete"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
name string

Default value is : default

Default: "default"
namespace boolean

By default, the state is isolated by namespace and flow, setting to true will allow to share the state between the same namespace

Default value is : false

Default: false
taskrunValue boolean

By default, the state will be isolated with taskrun.value (during iteration with each). Setting to false will allow using the same state for every run of the iteration.

Default value is : true

Default: true
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.state.Get object
Examples

Get the default state file for the current flow.

id: get_state
type: io.kestra.plugin.core.state.Get

Get the myState state for the current flow.

id: get_state
type: io.kestra.plugin.core.state.Get
name: myState
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.state.Get" required
Constant: "io.kestra.plugin.core.state.Get"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
name string

Default value is : default

Default: "default"
namespace boolean

By default, the state is isolated by namespace and flow, setting to true will allow to share the state between the same namespace

Default value is : false

Default: false
taskrunValue boolean

By default, the state will be isolated with taskrun.value (during iteration with each). Setting to false will allow using the same state for every run of the iteration.

Default value is : true

Default: true
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.state.Set object

Values will be merged:

  • If you provide a new key, the new key will be added.
  • If you provide an existing key, the previous key will be overwrite.

::alert{type="warning"} This method is not concurrency safe. If many executions for the same flow are concurrent, there is no guarantee on isolation on the value. The value can be overwritten by other executions. ::

Examples

Set the default state for the current flow.

id: set_state
type: io.kestra.plugin.core.state.Set
data:
  '{{ inputs.store }}': '{{ outputs.download.md5 }}'

Set the myState state for the current flow.

id: set_state
type: io.kestra.plugin.core.state.Set
name: myState
data:
  '{{ inputs.store }}': '{{ outputs.download.md5 }}'
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.state.Set" required
Constant: "io.kestra.plugin.core.state.Set"
allowFailure boolean

Default value is : false

Default: false
data object
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
name string

Default value is : default

Default: "default"
namespace boolean

By default, the state is isolated by namespace and flow, setting to true will allow to share the state between the same namespace

Default value is : false

Default: false
taskrunValue boolean

By default, the state will be isolated with taskrun.value (during iteration with each). Setting to false will allow using the same state for every run of the iteration.

Default value is : true

Default: true
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.Concat object
Examples

Concat 2 files with a custom separator.

files: 
  - "kestra://long/url/file1.txt"
  - "kestra://long/url/file2.txt"
separator: "\n"

Concat files generated by an each task.

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: start_api_call
        type: io.kestra.plugin.scripts.shell.Commands
        commands:
          - echo {{ taskrun.value }} > {{ temp.generated }}
        files:
          - generated
    value: '["value1", "value2", "value3"]'
  - id: concat
    type: io.kestra.plugin.core.storage.Concat
    files:
      - "{{ outputs.start_api_call.value1.files.generated }}"
      - "{{ outputs.start_api_call.value2.files.generated }}"
      - "{{ outputs.start_api_call.value3.files.generated }}"

Concat a dynamic number of files.

tasks:
  - id: echo
    type: io.kestra.plugin.scripts.shell.Commands
    commands:
      - echo "Hello John" > {{ outputDirs.output }}/1.txt
      - echo "Hello Jane" > {{ outputDirs.output }}/2.txt
      - echo "Hello Doe" > {{ outputDirs.output }}/3.txt
    outputDirs:
      - output
  - id: concat
    type: io.kestra.plugin.core.storage.Concat
    files: "{{ outputs.echo.files | jq('.[]') }}"
files required

Must be a kestra:// storage URIs, can be a list of string or json string

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.Concat" required
Constant: "io.kestra.plugin.core.storage.Concat"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
extension string

Default value is : .tmp

Default: ".tmp"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
separator string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.DeduplicateItems object

The Deduplicate task involves reading the input file twice, rather than loading the entire file into memory. The first iteration is used to build a deduplication map in memory containing the last lines observed for each key. The second iteration is used to rewrite the file without the duplicates. The task must be used with this in mind.

Examples
tasks:
   - id: deduplicate
     type: io.kestra.plugin.core.storage.DeduplicateItems
     from: "{{ inputs.uri }}"
     expr: "{{ key }}"

expr string required

The 'pebble' expression can be used for constructing a composite key.

from string required

Must be a kestra:// internal storage URI.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.DeduplicateItems" required
Constant: "io.kestra.plugin.core.storage.DeduplicateItems"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.Delete object
Examples
uri: "kestra://long/url/file.txt"
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.Delete" required
Constant: "io.kestra.plugin.core.storage.Delete"
uri string required

Must be a kestra:// storage URI.

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.FilterItems object
Examples
tasks:
   - id: filter
     type: io.kestra.plugin.core.storage.FilterItems
     from: "{{ inputs.file }}"
     filterCondition: " {{ value == null }}"
     filterType: EXCLUDE
     errorOrNullBehavior: EXCLUDE

filterCondition string required

The 'pebble' expression should return a BOOLEAN value (i.e. true or false). Values 0, -0, and "" are interpreted as false. Otherwise, any non empty value will be interpreted as true.

from string required

Must be a kestra:// internal storage URI.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.FilterItems" required
Constant: "io.kestra.plugin.core.storage.FilterItems"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOrNullBehavior string

Use FAIL to throw the exception and fail the task, INCLUDE to pass the item through, or EXCLUDE to drop the item.

Default value is : FAIL

Default: "FAIL"
Values: "FAIL" "INCLUDE" "EXCLUDE"
filterType string

Use INCLUDE to pass the item through, or EXCLUDE to drop the items.

Default value is : INCLUDE

Default: "INCLUDE"
Values: "INCLUDE" "EXCLUDE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.LocalFiles object

This task was intended to be used along with the WorkingDirectory task to create temporary files. This task suffers from multiple limitations e.g. it cannot be skipped, so setting disabled: true will have no effect. Overall, the WorkingDirectory task is more flexible and should be used instead of this task. This task will be removed in a future version of Kestra.##### Examples

Output local files created in a Python task and load them to S3.

    id: outputs_from_python_task
    namespace: company.team

    tasks:
      - id: wdir
        type: io.kestra.plugin.core.flow.WorkingDirectory
        tasks:
          - id: clone_repository
            type: io.kestra.plugin.git.Clone
            url: https://github.com/kestra-io/examples
            branch: main

          - id: git_python_scripts
            type: io.kestra.plugin.scripts.python.Commands
            warningOnStdErr: false
            runner: DOCKER
            docker:
              image: ghcr.io/kestra-io/pydata:latest
            beforeCommands:
              - pip install faker > /dev/null
            commands:
              - python examples/scripts/etl_script.py
              - python examples/scripts/generate_orders.py

          - id: export_files
            type: io.kestra.plugin.core.storage.LocalFiles
            outputs:
              - orders.csv
              - "*.parquet"

      - id: load_csv_to_s3
        type: io.kestra.plugin.aws.s3.Upload
        accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}"
        secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}"
        region: eu-central-1
        bucket: kestraio
        key: stage/orders.csv
        from: "{{ outputs.export_files.outputFiles['orders.csv'] }}"
        disabled: true

Create a local file that will be accessible to a bash task.

id: "local_files"
namespace: "io.kestra.tests"

tasks:
  - id: working_dir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
    - id: input_files
      type: io.kestra.plugin.core.storage.LocalFiles
      inputs:
        hello.txt: "Hello World\n"
        address.json: "{{ outputs.my_task_id.uri }}"
    - id: bash
      type: io.kestra.plugin.scripts.shell.Commands
      commands:
        - cat hello.txt

Send local files to Kestra's internal storage.

id: "local_files"
namespace: "io.kestra.tests"

tasks:
  - id: working_dir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
    - id: bash
      type: io.kestra.plugin.scripts.shell.Commands
      commands:
        - mkdir -p sub/dir
        - echo "Hello from Bash" >> sub/dir/bash1.txt
        - echo "Hello from Bash" >> sub/dir/bash2.txt
    - id: output_files
      type: io.kestra.plugin.core.storage.LocalFiles
      outputs:
        - sub/**

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.LocalFiles" required
Constant: "io.kestra.plugin.core.storage.LocalFiles"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
inputs object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
outputs string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.PurgeCurrentExecutionFiles object

This will delete all the generated files from a flow for the current execution. This will delete all files from:

  • inputs
  • outputs
  • triggers

If the current execution doesn't have any generated files, the task will not fail.##### Examples


id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.PurgeCurrentExecutionFiles" required
Constant: "io.kestra.plugin.core.storage.PurgeCurrentExecutionFiles"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.Reverse object
Examples
from: "kestra://long/url/file1.txt"
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.Reverse" required
Constant: "io.kestra.plugin.core.storage.Reverse"
allowFailure boolean

Default value is : false

Default: false
charset string

Default value is : UTF-8

Default: "UTF-8"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
separator string

Default value is : |2+

Default: " "
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.Size object
Examples
uri: "kestra://long/url/file.txt"
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.Size" required
Constant: "io.kestra.plugin.core.storage.Size"
uri string required

Must be a kestra:// storage URI.

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.storage.Split object
Examples

Split a file by size.

from: "kestra://long/url/file1.txt"
bytes: 10MB

Split a file by rows count.

from: "kestra://long/url/file1.txt"
rows: 1000

Split a file in a defined number of partitions.

from: "kestra://long/url/file1.txt"
partitions: 8
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.storage.Split" required
Constant: "io.kestra.plugin.core.storage.Split"
allowFailure boolean

Default value is : false

Default: false
bytes string

Can be provided as a string in the format "10MB" or "200KB", or the number of bytes. This allows you to process large files, slit them into smaller chunks by lines and process them in parallel. For example, MySQL by default limits the size of a query size to 16MB per query. Trying to use a bulk insert query with input data larger than 16MB will fail. Splitting the input data into smaller chunks is a common strategy to circumvent this limitation. By dividing a large data set into chunks smaller than the max_allowed_packet size (e.g., 10MB), you can insert the data in multiple smaller queries. This approach not only helps to avoid hitting the query size limit but can also be more efficient and manageable in terms of memory utilization, especially for very large datasets. In short, by splitting the file by bytes, you can bulk-insert smaller chunks of e.g. 10MB in parallel to avoid this limitation.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
partitions integer
rows integer
separator string

Default value is : \n

Default: "\n"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.templating.TemplatedTask object
Examples
spec: |
  type: io.kestra.plugin.fs.http.Download
  {{ task.property }}: {{ task.value }}
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
spec string required
type const: "io.kestra.plugin.core.templating.TemplatedTask" required
Constant: "io.kestra.plugin.core.templating.TemplatedTask"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.trigger.Flow object

You can trigger a flow as soon as another flow ends. This allows you to add implicit dependencies between multiple flows, which can often be managed by different teams. ::alert{type="warning"} If you don't provide any conditions, the flow will be triggered for EVERY execution of EVERY flow on your instance. ::##### Examples

This flow will be triggered after each successful execution of flow company.team.trigger_flow and forward the uri of my_task taskId outputs.

id: trigger_flow_listener
namespace: company.team

inputs:
  - id: from_parent
    type: STRING

tasks:
  - id: only_no_input
    type: io.kestra.plugin.core.debug.Return
    format: "v1: {{ trigger.executionId }}"

triggers:
  - id: listen_flow
    type: io.kestra.plugin.core.trigger.Flow
    inputs:
      from-parent: '{{ outputs.my_task.uri }}'
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionFlowCondition
        namespace: company.team
        flowId: trigger_flow
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - SUCCESS

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.trigger.Flow" required
Constant: "io.kestra.plugin.core.trigger.Flow"
conditions array
description string
disabled boolean

Default value is : false

Default: false
inputs object

Fill input of this flow based on output of current flow, allowing to pass data or file to the triggered flow ::alert{type="warning"} If you provide invalid input, the flow will not be created! Since there is no task started, you can't log any reason that's visible on the Execution UI. So you will need to go to the Logs tabs on the UI to understand the error. ::

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
states string[]

By default, only executions in a terminal state will be evaluated. If you use a condition of type ExecutionStatusCondition it will be evaluated after this list. ::alert{type="info"} The trigger will be evaluated on each execution state change, this means that, for non-terminal state, they can be observed multiple times. For example, if a flow has two Pause tasks, the execution will transition two times from PAUSED to RUNNING so these states will be observed two times. :: ::alert{type="warning"} You cannot evaluate on the CREATED state. ::

Default value is : `- SUCCESS

  • WARNING
  • FAILED
  • KILLED
  • CANCELLED
  • RETRIED`

Default value is : `- SUCCESS

  • WARNING
  • FAILED
  • KILLED
  • CANCELLED
  • RETRIED`
Default:
[
  "SUCCESS",
  "WARNING",
  "FAILED",
  "KILLED",
  "CANCELLED",
  "RETRIED"
]
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.trigger.Schedule object

You can add multiple schedule(s) to a flow. The scheduler keeps track of the last scheduled date, allowing you to easily backfill missed executions. Keep in mind that if you change the trigger ID, the scheduler will consider this as a new schedule, and will start creating new scheduled executions from the current date. By default, Schedules will use UTC. If you need a different timezone, use the timezone property to update it.##### Examples

Schedule a flow every 15 minutes.

id: scheduled_flow
namespace: company.team

tasks:
  - id: sleep_randomly
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - echo "{{ execution.startDate ?? trigger.date }}"
      - sleep $((RANDOM % 60 + 1))

triggers:
  - id: every_15_minutes
    type: io.kestra.plugin.core.trigger.Schedule
    cron: '*/15 * * * *'

Schedule a flow every hour using the cron nickname @hourly.

id: scheduled_flow
namespace: company.team

tasks:
  - id: log_hello_world
    type: io.kestra.plugin.core.log.Log
    message: Hello World! 🚀

triggers:
  - id: hourly
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "@hourly"

Schedule a flow on the first Monday of the month at 11 AM.

id: scheduled_flow
namespace: company.team

tasks:
  - id: log_hello_world
    type: io.kestra.plugin.core.log.Log
    message: Hello World! 🚀

triggers:
  - id: schedule
    cron: "0 11 * * 1"
    conditions:
      - type: io.kestra.plugin.core.condition.DayWeekInMonthCondition
        date: "{{ trigger.date }}"
        dayOfWeek: "MONDAY"
        dayInMonth: "FIRST"

Schedule a flow every day at 9:00 AM and pause a schedule trigger after a failed execution using the stopAfter property.

id: business_critical_flow
namespace: company.team

tasks:
  - id: important_task
    type: io.kestra.plugin.core.log.Log
    message: "if this run fails, disable the schedule until the issue is fixed"

triggers:
  - id: stop_after_failed
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "0 9 * * *"
    stopAfter:
      - FAILED
cron string required

A standard unix cron expression with 5 fields (minutes precision). Using withSeconds: true you can switch to 6 fields and a seconds precision. Can also be a cron extension / nickname:

  • @yearly
  • @annually
  • @monthly
  • @weekly
  • @daily
  • @midnight
  • @hourly
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.trigger.Schedule" required
Constant: "io.kestra.plugin.core.trigger.Schedule"
backfill object

This property is deprecated and will be removed in the future. Instead, you can now go to the Triggers tab and start a highly customizable backfill process directly from the UI. This will allow you to backfill missed scheduled executions by providing a specific date range and custom labels. Read more about it in the Backfill documentation.

conditions array
description string
disabled boolean

Default value is : false

Default: false
inputs object
lateMaximumDelay string

If the scheduled execution didn't start after this delay (e.g. due to infrastructure issues), the execution will be skipped.

format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
recoverMissedSchedules string

ALL will recover all missed schedules, LAST will only recovered the last missing one, NONE will not recover any missing schedule. The default is ALL unless a different value is configured using the global plugin configuration.

Values: "LAST" "NONE" "ALL"
scheduleConditions array

List of schedule conditions in order to limit the schedule trigger date.

stopAfter string[]
timezone string

Default value is : Etc/UTC

Default: "Etc/UTC"
withSeconds boolean

By default, the cron expression has 5 fields, setting this property to true will allow a 6th fields for seconds precision.

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.trigger.ScheduleOnDates object
dates string[] required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.core.trigger.ScheduleOnDates" required
Constant: "io.kestra.plugin.core.trigger.ScheduleOnDates"
conditions array
description string
disabled boolean

Default value is : false

Default: false
inputs object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
recoverMissedSchedules string

ALL will recover all missed schedules, LAST will only recovered the last missing one, NONE will not recover any missing schedule. The default is ALL unless a different value is configured using the global plugin configuration.

Values: "LAST" "NONE" "ALL"
stopAfter string[]
timezone string

Default value is : Etc/UTC

Default: "Etc/UTC"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.trigger.Toggle object
Examples

Toggle a trigger on flow input.

id: trigger_toggle
namespace: company.team

inputs:
  - id: toggle
    type: BOOLEAN
    defaults: true

tasks:
  - id: if
    type: io.kestra.plugin.core.flow.If
    condition: "{{inputs.toggle}}"
    then:
      - id: enable
        type: io.kestra.plugin.core.trigger.Toggle
        trigger: schedule
        enabled: true
    else:
      - id: disable
        type: io.kestra.plugin.core.trigger.Toggle
        trigger: schedule
        enabled: false
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: Hello World

triggers:
  - id: schedule
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "* * * * *"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
trigger string required
type const: "io.kestra.plugin.core.trigger.Toggle" required
Constant: "io.kestra.plugin.core.trigger.Toggle"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
enabled boolean

Default value is : false

Default: false
flowId string

If not set, the current flow identifier will be used.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string

If not set, the current flow namespace will be used.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.core.trigger.Webhook object

Webhook trigger allows you to create a unique URL that you can use to trigger a Kestra flow execution based on events in another application such as GitHub or Amazon EventBridge. In order to use that URL, you have to add a secret key that will secure your webhook URL.

The URL will then follow the following format: https://{your_hostname}/api/v1/executions/webhook/{namespace}/{flowId}/{key}. Replace the templated values according to your workflow setup.

The webhook URL accepts GET, POST and PUT requests.

You can access the request body and headers sent by another application using the following template variables:

  • {{ trigger.body }}
  • {{ trigger.headers }}.

The webhook response will be one of the following HTTP status codes:

  • 404 if the namespace, flow or webhook key is not found.
  • 200 if the webhook triggers an execution.
  • 204 if the webhook cannot trigger an execution due to a lack of matching event conditions sent by other application.

A webhook trigger can have conditions but it doesn't support conditions of type MultipleCondition.##### Examples

Add a webhook trigger to the current flow with the key 4wjtkzwVGBM9yKnjm3yv8r, the webhook will be available at the URI /api/v1/executions/webhook/{namespace}/{flowId}/4wjtkzwVGBM9yKnjm3yv8r.

id: webhook_flow
namespace: company.team

tasks:
  - id: log_hello_world
    type: io.kestra.plugin.core.log.Log
    message: Hello World! 🚀

triggers:
  - id: webhook
    type: io.kestra.plugin.core.trigger.Webhook
    key: 4wjtkzwVGBM9yKnjm3yv8r

Add a trigger matching specific webhook event condition. The flow will be executed only if the condition is met.`.

id: condition_based_webhook_flow
namespace: company.team

tasks:
  - id: log_hello_world
    type: io.kestra.plugin.core.log.Log
    message: Hello World! 🚀

triggers:
  - id: webhook
    type: io.kestra.plugin.core.trigger.Webhook
    key: 4wjtkzwVGBM9yKnjm3yv8r
    conditions:
      - type: io.kestra.plugin.core.condition.ExpressionCondition
        expression: "{{ trigger.body.hello == 'world' }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required

The key is used for generating the URL of the webhook.

::alert{type="warning"} Make sure to keep the webhook key secure. It's the only security mechanism to protect your endpoint from bad actors, and must be considered as a secret. You can use a random key generator to create the key. ::

maxLength=256
type const: "io.kestra.plugin.core.trigger.Webhook" required
Constant: "io.kestra.plugin.core.trigger.Webhook"
conditions array
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.couchbase.Query object
Examples

Send a N1QL query to a Couchbase database.

connectionString: couchbase://localhost
username: couchbase_user
password: couchbase_passwd
query: SELECT * FROM `COUCHBASE_BUCKET`(.`COUCHBASE_SCOPE`.`COUCHBASE_COLLECTION`)
fetchType: FETCH
connectionString string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
password string required
minLength=1
query string required
minLength=1
type const: "io.kestra.plugin.couchbase.Query" required
Constant: "io.kestra.plugin.couchbase.Query"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fetchType string

FETCH_ONE - output just the first row. FETCH - output all the rows. STORE - store all the rows in a file. NONE - do nothing.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parameters Record<string, string>

See Couchbase documentation about Prepared Statements for query syntax. This should be supplied with a parameter map if using named parameters, or an array for positional ones.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.couchbase.Trigger object
Examples

Wait for a N1QL query to return results, and then iterate through rows.

id: couchbase-trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.couchbase.Trigger
    interval: "PT5M"
    connectionString: couchbase://localhost
    username: couchbase_user
    password: couchbase_passwd
    query: SELECT * FROM `COUCHBASE_BUCKET`(.`COUCHBASE_SCOPE`.`COUCHBASE_COLLECTION`)
    fetchType: FETCH
connectionString string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
password string required
minLength=1
query string required
minLength=1
type const: "io.kestra.plugin.couchbase.Trigger" required
Constant: "io.kestra.plugin.couchbase.Trigger"
username string required
minLength=1
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetchType string

FETCH_ONE - output just the first row. FETCH - output all the rows. STORE - store all the rows in a file. NONE - do nothing.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parameters Record<string, string>

See Couchbase documentation about Prepared Statements for query syntax. This should be supplied with a parameter map if using named parameters, or an array for positional ones.

stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.crypto.openpgp.Decrypt object
Examples

Decrypt a file

id: crypto_decrypt
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: decrypt
    type: io.kestra.plugin.crypto.openpgp.Decrypt
    from: "{{ inputs.file }}"
    privateKey: |
      -----BEGIN PGP PRIVATE KEY BLOCK-----
    privateKeyPassphrase: my-passphrase

Decrypt a file and verify signature

id: crypto_decrypt
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: decrypt
    type: io.kestra.plugin.crypto.openpgp.Decrypt
    from: "{{ inputs.file }}"
    privateKey: |
      -----BEGIN PGP PRIVATE KEY BLOCK-----
    privateKeyPassphrase: my-passphrase
    signUsersKey:
      - |
        -----BEGIN PGP PRIVATE KEY BLOCK-----
    requiredSignerUsers:
      - [email protected]

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.crypto.openpgp.Decrypt" required
Constant: "io.kestra.plugin.crypto.openpgp.Decrypt"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
from string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
privateKey string

Must be an ascii key export with gpg --export-secret-key -a

privateKeyPassphrase string
requiredSignerUsers string[]
signUsersKey string[]

Must be an ascii key export with gpg --export -a

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.crypto.openpgp.Encrypt object
Examples

Encrypt a file not signed

id: crypto_encrypt
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: encrypt
    type: io.kestra.plugin.crypto.openpgp.Encrypt
    from: "{{ inputs.file }}"
    key: |
      -----BEGIN PGP PUBLIC KEY BLOCK----- ...
    recipients:
      - [email protected]

Encrypt a file signed

id: crypto_encrypt
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: encrypt
    type: io.kestra.plugin.crypto.openpgp.Encrypt
    from: "{{ inputs.file }}"
    key: |
      -----BEGIN PGP PUBLIC KEY BLOCK----- ...
    recipients:
      - [email protected]
    signPublicKey: |
      -----BEGIN PGP PUBLIC KEY BLOCK----- ...
    signPrivateKey: |
      -----BEGIN PGP PRIVATE KEY BLOCK-----
    signPassphrase: my-passphrase
    signUser: [email protected]

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
recipients string[] required
type const: "io.kestra.plugin.crypto.openpgp.Encrypt" required
Constant: "io.kestra.plugin.crypto.openpgp.Encrypt"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
from string
key string

Must be an ascii key export with gpg --export -a

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
signPassphrase string
signPrivateKey string

Must be an ascii key export with gpg --export -a

signPublicKey string

Must be an ascii key export with gpg --export -a

signUser string

If you want to sign the file, you need to provide a privateKey

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.databricks.AbstractTask-AuthenticationConfig object
authType string
azureClientId string
azureClientSecret string
azureTenantId string
clientId string
clientSecret string
googleCredentials string
googleServiceAccount string
password string
token string
username string
io.kestra.plugin.databricks.cluster.CreateCluster object
Examples

Create a Databricks cluster with one worker.

id: databricks_create_cluster
namespace: company.team

tasks:
  - id: create_cluster
    type: io.kestra.plugin.databricks.cluster.CreateCluster
    authentication:
      token: <your-token>
    host: <your-host>
    clusterName: kestra-demo
    nodeTypeId: n2-highmem-4
    numWorkers: 1
    sparkVersion: 13.0.x-scala2.12

clusterName string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sparkVersion string required
type const: "io.kestra.plugin.databricks.cluster.CreateCluster" required
Constant: "io.kestra.plugin.databricks.cluster.CreateCluster"
accountId string
allowFailure boolean

Default value is : false

Default: false
authentication
All of: io.kestra.plugin.databricks.AbstractTask-AuthenticationConfig object, Databricks authentication configuration.
autoTerminationMinutes integer
configFile string
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxWorkers integer

Use this property along with minWorkers to use autoscaling. Otherwise, set a fixed number of workers using numWorkers.

minWorkers integer

Use this property along with maxWorkers for autoscaling. Otherwise, set a fixed number of workers using numWorkers.

nodeTypeId string
numWorkers integer

You must set this property unless you use the minWorkers and maxWorkers properties for autoscaling.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.databricks.cluster.DeleteCluster object
Examples

Delete a Databricks cluster.

id: databricks_delete_cluster
namespace: company.team

tasks:
  - id: delete_cluster
    type: io.kestra.plugin.databricks.cluster.DeleteCluster
    authentication:
      token: <your-token>
    host: <your-host>
    clusterId: <your-cluster>

clusterId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.databricks.cluster.DeleteCluster" required
Constant: "io.kestra.plugin.databricks.cluster.DeleteCluster"
accountId string
allowFailure boolean

Default value is : false

Default: false
authentication
All of: io.kestra.plugin.databricks.AbstractTask-AuthenticationConfig object, Databricks authentication configuration.
configFile string
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.databricks.dbfs.Download object

The file can be of any size. The task will download the file in chunks of 1MB.##### Examples

Download a file from the Databricks File System.

id: databricks_dbfs_download
namespace: company.team

tasks:
  - id: download_file
    type: io.kestra.plugin.databricks.dbfs.Download
    authentication:
      token: <your-token>
    host: <your-host>
    from: /Share/myFile.txt

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.databricks.dbfs.Download" required
Constant: "io.kestra.plugin.databricks.dbfs.Download"
accountId string
allowFailure boolean

Default value is : false

Default: false
authentication
All of: io.kestra.plugin.databricks.AbstractTask-AuthenticationConfig object, Databricks authentication configuration.
configFile string
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.databricks.dbfs.Upload object

The file can be of any size. The task will upload the file in chunks of 1MB.##### Examples

Upload a file to the Databricks File System.

id: databricks_dbfs_upload
namespace: company.team

inputs:
  - id: file
    type: FILE
    description: File to be uploaded to DBFS

tasks:
  - id: upload_file
    type: io.kestra.plugin.databricks.dbfs.Upload
    authentication:
      token: <your-token>
    host: <your-host>
    from: "{{ inputs.file }}"
    to: /Share/myFile.txt

from string required

Must be a file from Kestra internal storage.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required
type const: "io.kestra.plugin.databricks.dbfs.Upload" required
Constant: "io.kestra.plugin.databricks.dbfs.Upload"
accountId string
allowFailure boolean

Default value is : false

Default: false
authentication
All of: io.kestra.plugin.databricks.AbstractTask-AuthenticationConfig object, Databricks authentication configuration.
configFile string
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.databricks.job.CreateJob object
Examples

Create a Databricks job, run it, and wait for completion for five minutes.

id: databricks_job_create
namespace: company.team

tasks:
  - id: create_job
    type: io.kestra.plugin.databricks.job.CreateJob
    authentication:
      token: <your-token>
    host: <your-host>
    jobTasks:
      - existingClusterId: <your-cluster>
        taskKey: taskKey
        sparkPythonTask:
          pythonFile: /Shared/hello.py
          sparkPythonTaskSource: WORKSPACE
    waitForCompletion: PT5M

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
jobTasks array required
minItems=1
type const: "io.kestra.plugin.databricks.job.CreateJob" required
Constant: "io.kestra.plugin.databricks.job.CreateJob"
accountId string
allowFailure boolean

Default value is : false

Default: false
authentication
All of: io.kestra.plugin.databricks.AbstractTask-AuthenticationConfig object, Databricks authentication configuration.
configFile string
description string
disabled boolean

Default value is : false

Default: false
host string
jobName string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
waitForCompletion string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.databricks.job.CreateJob-JobTaskSetting object
dbtTask
All of: io.kestra.plugin.databricks.job.task.DbtTaskSetting object, DBT task settings.
dependsOn string[]
description string
existingClusterId string
libraries array
notebookTask
All of: io.kestra.plugin.databricks.job.task.NotebookTaskSetting object, Notebook task settings.
pipelineTask
All of: io.kestra.plugin.databricks.job.task.PipelineTaskSetting object, Pipeline task settings.
pythonWheelTask
All of: io.kestra.plugin.databricks.job.task.PythonWheelTaskSetting object, Python Wheel task settings.
sparkJarTask
All of: io.kestra.plugin.databricks.job.task.SparkJarTaskSetting object, Spark JAR task settings.
sparkPythonTask
All of: io.kestra.plugin.databricks.job.task.SparkPythonTaskSetting object, Spark Python task settings.
sparkSubmitTask
All of: io.kestra.plugin.databricks.job.task.SparkSubmitTaskSetting object, Spark Submit task settings.
sqlTask
All of: io.kestra.plugin.databricks.job.task.SqlTaskSetting object, SQL task settings.
taskKey string
timeoutSeconds integer
io.kestra.plugin.databricks.job.SubmitRun object
Examples

Submit a Databricks run and wait up to 5 minutes for its completion.

id: databricks_job_submit_run
namespace: company.team

tasks:
  - id: submit_run
    type: io.kestra.plugin.databricks.job.SubmitRun
    authentication:
      token: <your-token>
    host: <your-host>
    runTasks:
      - existingClusterId: <your-cluster>
        taskKey: taskKey
        sparkPythonTask:
          pythonFile: /Shared/hello.py
          sparkPythonTaskSource: WORKSPACE
    waitForCompletion: PT5M

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
runTasks array required
minItems=1
type const: "io.kestra.plugin.databricks.job.SubmitRun" required
Constant: "io.kestra.plugin.databricks.job.SubmitRun"
accountId string
allowFailure boolean

Default value is : false

Default: false
authentication
All of: io.kestra.plugin.databricks.AbstractTask-AuthenticationConfig object, Databricks authentication configuration.
configFile string
description string
disabled boolean

Default value is : false

Default: false
host string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
runName string
timeout string
format=duration
waitForCompletion string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.databricks.job.SubmitRun-RunSubmitTaskSetting object
dependsOn string[]
existingClusterId string
libraries array
notebookTask
All of: io.kestra.plugin.databricks.job.task.NotebookTaskSetting object, Notebook task settings.
pipelineTask
All of: io.kestra.plugin.databricks.job.task.PipelineTaskSetting object, Pipeline task settings.
pythonWheelTask
All of: io.kestra.plugin.databricks.job.task.PythonWheelTaskSetting object, Python Wheel task settings.
sparkJarTask
All of: io.kestra.plugin.databricks.job.task.SparkJarTaskSetting object, Spark JAR task settings.
sparkPythonTask
All of: io.kestra.plugin.databricks.job.task.SparkPythonTaskSetting object, Spark Python task settings.
sparkSubmitTask
All of: io.kestra.plugin.databricks.job.task.SparkSubmitTaskSetting object, Spark Submit task settings.
taskKey string
timeoutSeconds integer
io.kestra.plugin.databricks.job.task.DbtTaskSetting object
catalog string
commands string[]
schema string
warehouseId string
io.kestra.plugin.databricks.job.task.LibrarySetting object
io.kestra.plugin.databricks.job.task.LibrarySetting-CranSetting object
_package string
repo string
io.kestra.plugin.databricks.job.task.LibrarySetting-MavenSetting object
coordinates string
exclusions string[]
repo string
io.kestra.plugin.databricks.job.task.LibrarySetting-PypiSetting object
_package string
repo string
io.kestra.plugin.databricks.job.task.NotebookTaskSetting object
baseParameters Record<string, string>

Can be a map of string/string or a variable that binds to a JSON object.

notebookPath string
source string
Values: "GIT" "WORKSPACE"
io.kestra.plugin.databricks.job.task.PipelineTaskSetting object
fullRefresh boolean
pipelineId string
io.kestra.plugin.databricks.job.task.PythonWheelTaskSetting object
entryPoint string
namedParameters Record<string, string>

Can be a map of string/string or a variable that binds to a JSON object.

packageName string
parameters string | string[]

Can be a list of strings or a variable that binds to a JSON array of strings.

io.kestra.plugin.databricks.job.task.SparkJarTaskSetting object
jarUri string
mainClassName string
parameters string | string[]

Can be a list of strings or a variable that binds to a JSON array of strings.

io.kestra.plugin.databricks.job.task.SparkPythonTaskSetting object
pythonFile string required
sparkPythonTaskSource string required
Values: "GIT" "WORKSPACE"
parameters string | string[]

Can be a list of strings or a variable that binds to a JSON array of strings.

io.kestra.plugin.databricks.job.task.SparkSubmitTaskSetting object
parameters string | string[]

Can be a list of strings or a variable that binds to a JSON array of strings.

io.kestra.plugin.databricks.job.task.SqlTaskSetting object
parameters Record<string, string>

Can be a map of string/string or a variable that binds to a JSON object.

queryId string
warehouseId string
io.kestra.plugin.databricks.sql.Query object

See Retrieve the connection details in the Databricks documentation to discover how to retrieve the needed configuration properties. We're using the Databricks JDBC driver to execute a Query, see https://docs.databricks.com/integrations/jdbc-odbc-bi.html#jdbc-driver-capabilities for its capabilities.

Due to current limitation of the JDBC driver with Java 21, Arrow is disabled, performance may be impacted, see here and here from Databricks status on Java 21 support.

Examples
id: databricks_sql_query
namespace: company.team

tasks:
  - id: sql_query
    type: io.kestra.plugin.databricks.sql.Query
    accessToken: <your-accessToken>
    host: <your-host>
    httpPath: <your-httpPath>
    sql: SELECT 1

host string required
httpPath string required

To retrieve the HTTP Path, go to your Databricks cluster, click on Advanced options then, click on JDBC/ODBC. See Retrieve the connection details for more details.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sql string required
type const: "io.kestra.plugin.databricks.sql.Query" required
Constant: "io.kestra.plugin.databricks.sql.Query"
accessToken string
allowFailure boolean

Default value is : false

Default: false
catalog string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
properties object
schema string
timeZoneId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dataform.cli.DataformCLI object
Examples

Compile and run a Dataform project from Git

id: dataform
namespace: company.team
tasks:
  - id: wdir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repo
        type: io.kestra.plugin.git.Clone
        url: https://github.com/dataform-co/dataform-example-project-bigquery

      - id: transform
        type: io.kestra.plugin.dataform.cli.DataformCLI
        beforeCommands:
          - dataform compile
        commands:
          - dataform run --dry-run

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dataform.cli.DataformCLI" required
Constant: "io.kestra.plugin.dataform.cli.DataformCLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : dataformco/dataform:latest

Default: "dataformco/dataform:latest"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Build object
Examples

Invoke dbt build command.

id: dbt_build
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_build
        type: io.kestra.plugin.dbt.cli.Build
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.Build" required
Constant: "io.kestra.plugin.dbt.cli.Build"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Compile object
Examples

Invoke dbt compile command.

id: dbt_compile
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_compile
        type: io.kestra.plugin.dbt.cli.Compile
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.Compile" required
Constant: "io.kestra.plugin.dbt.cli.Compile"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.DbtCLI object
Examples

Launch a dbt build command on a sample dbt project hosted on GitHub.

id: dbt_build
namespace: company.team

tasks:
  - id: dbt
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: cloneRepository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-example
        branch: main

      - id: dbt-build
        type: io.kestra.plugin.dbt.cli.DbtCLI
        containerImage: ghcr.io/kestra-io/dbt-duckdb:latest
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        commands:
          - dbt build
        profiles: |
          my_dbt_project:
            outputs:
              dev:
                type: duckdb
                path: ":memory:"
            target: dev

Install a custom dbt version and run dbt deps and dbt build commands. Note how you can also configure the memory limit for the Docker runner. This is useful when you see Zombie processes.

id: dbt_custom_dependencies
namespace: company.team

inputs:
  - id: dbt_version
    type: STRING
    defaults: "dbt-duckdb==1.6.0"

tasks:
  - id: git
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-example
        branch: main

      - id: dbt
        type: io.kestra.plugin.dbt.cli.DbtCLI
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
          memory:
            memory: 1GB
        containerImage: python:3.11-slim
        beforeCommands:
          - pip install uv
          - uv venv --quiet
          - . .venv/bin/activate --quiet
          - uv pip install --quiet {{ inputs.dbt_version }}
        commands:
          - dbt deps
          - dbt build
        profiles: |
          my_dbt_project:
            outputs:
              dev:
                type: duckdb
                path: ":memory:"
                fixed_retries: 1
                threads: 16
                timeout_seconds: 300
            target: dev

Clone a Git repository and build dbt models. Note that, as the dbt project files are in a separate directory, you need to set the projectDir task property and use --project-dir in each dbt CLI command.

id: dwh_and_analytics
namespace: company.team

tasks:
  - id: dbt
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
    - id: clone_repository
      type: io.kestra.plugin.git.Clone
      url: https://github.com/kestra-io/dbt-example
      branch: master

    - id: dbt_build
      type: io.kestra.plugin.dbt.cli.DbtCLI
      taskRunner:
        type: io.kestra.plugin.scripts.runner.docker.Docker
      containerImage: ghcr.io/kestra-io/dbt-duckdb:latest
      commands:
        - dbt deps --project-dir dbt --target prod
        - dbt build --project-dir dbt --target prod
      projectDir: dbt
      profiles: |
        my_dbt_project:
          outputs:
            dev:
              type: duckdb
              path: dbt.duckdb
              extensions:
                - parquet
              fixed_retries: 1
              threads: 16
              timeout_seconds: 300
            prod:
              type: duckdb
              path: dbt2.duckdb
              extensions: 
                - parquet
              fixed_retries: 1
              threads: 16
              timeout_seconds: 300
          target: dev
commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.DbtCLI" required
Constant: "io.kestra.plugin.dbt.cli.DbtCLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exists in the current working directory, it will be overridden.

projectDir string

To use it, also use this directory in the --project-dir flag on the dbt CLI commands.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Deps object
Examples

Invoke dbt deps command

id: dbt_deps
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_deps
        type: io.kestra.plugin.dbt.cli.Deps
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.Deps" required
Constant: "io.kestra.plugin.dbt.cli.Deps"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Freshness object
Examples

Invoke dbt source freshness command.

id: dbt_freshness
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_freshness
        type: io.kestra.plugin.dbt.cli.Freshness
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.Freshness" required
Constant: "io.kestra.plugin.dbt.cli.Freshness"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.List object
Examples

Invoke dbt list command.

id: dbt_list
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_list
        type: io.kestra.plugin.dbt.cli.List
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.List" required
Constant: "io.kestra.plugin.dbt.cli.List"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Run object
Examples

Invoke dbt run command.

id: dbt_run
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_run
        type: io.kestra.plugin.dbt.cli.Run
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.Run" required
Constant: "io.kestra.plugin.dbt.cli.Run"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Seed object
Examples

Invoke dbt seed command.

id: dbt_seed
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_seed
        type: io.kestra.plugin.dbt.cli.Seed
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.Seed" required
Constant: "io.kestra.plugin.dbt.cli.Seed"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Setup object

Use it to install dbt requirements locally in a Python virtualenv if you don't want to use dbt via Docker. In this case, you need to use a WorkingDirectory task and this Setup task to setup dbt prior to using any of the dbt tasks.##### Examples

Setup dbt by installing pip dependencies in a Python virtualenv and initializing the profile directory.

id: dbt_setup
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_setup
        type: io.kestra.plugin.dbt.cli.Setup
        requirements:
          - dbt-duckdb
        profiles:
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

      - id: dbt_build
        type: io.kestra.plugin.dbt.cli.Build

exitOnFailed boolean | string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
profiles object | string required
requirements string[] required

Python dependencies list to setup in the virtualenv, in the same format than requirements.txt. It must at least provides dbt.

type const: "io.kestra.plugin.dbt.cli.Setup" required
Constant: "io.kestra.plugin.dbt.cli.Setup"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : python

Default: "python"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
dockerOptions string
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles

You can define the files as map or a JSON string. Each file can be defined inlined or can reference a file from Kestra's internal storage.

interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

pythonPath string

Set the python interpreter path to use.

Default value is : python

Default: "python"
minLength=1
runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Snapshot object
Examples

Invoke dbt snapshot command.

id: dbt_snapshot
namespace: company.team

tasks:
  - id: working_directory
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-demo
        branch: main

      - id: dbt_snapshot
        type: io.kestra.plugin.dbt.cli.Snapshot
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          jaffle_shop:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
                extensions:
                  - parquet
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.Snapshot" required
Constant: "io.kestra.plugin.dbt.cli.Snapshot"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cli.Test object
Examples

Invoke dbt test command.

id: dbt_test
namespace: company.team

tasks:
  - id: wdir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/dbt-example
        branch: main

      - id: dbt_test
        type: io.kestra.plugin.dbt.cli.Test
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
        dbtPath: /usr/local/bin/dbt
        containerImage: ghcr.io/kestra-io/dbt-duckdb
        profiles: |
          my_dbt_project:
            outputs:
              dev:
                type: duckdb
                path: ':memory:'
            target: dev

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.dbt.cli.Test" required
Constant: "io.kestra.plugin.dbt.cli.Test"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : ghcr.io/kestra-io/dbt

Default: "ghcr.io/kestra-io/dbt"
dbtPath string

Default value is : ./bin/dbt

Default: "./bin/dbt"
debug boolean | string
description string
disabled boolean

Default value is : false

Default: false
docker string
dockerOptions string
env object
exclude string[]
failFast boolean | string
fullRefresh boolean | string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

parseRunResults boolean | string
profiles string

If a profile.yml file already exist in the current working directory, it will be overridden.

projectDir string

Default is the current working directory and its parents.

runner string

Deprecated, use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
select string[]
selector string
target string
taskRunner
thread integer | string
timeout string
format=duration
warnError boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cloud.CheckStatus object
Examples
id: dbt_check_status
namespace: company.team

tasks:
  - id: check_status
    type: io.kestra.plugin.dbt.cloud.CheckStatus
    accountId: "dbt_account"
    token: "dbt_token"
    runId: "run_id"

accountId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
token string required
type const: "io.kestra.plugin.dbt.cloud.CheckStatus" required
Constant: "io.kestra.plugin.dbt.cloud.CheckStatus"
allowFailure boolean

Default value is : false

Default: false
baseUrl string

Default value is : https://cloud.getdbt.com

Default: "https://cloud.getdbt.com"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string | string
parseRunResults boolean | string
pollFrequency string | string
runId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.dbt.cloud.TriggerRun object

Use this task to kick off a run for a job. When this endpoint returns a successful response, a new run will be enqueued for the account. If you activate the wait option, it will wait for the job to be ended and will display all the log and dynamic tasks.##### Examples

id: dbt_trigger_job_run
namespace: company.team

tasks:
  - id: trigger_run
    type: io.kestra.plugin.dbt.cloud.TriggerRun
    accountId: "dbt_account"
    token: "dbt_token"
    jobId: "job_id"

accountId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
jobId string required
token string required
type const: "io.kestra.plugin.dbt.cloud.TriggerRun" required
Constant: "io.kestra.plugin.dbt.cloud.TriggerRun"
allowFailure boolean

Default value is : false

Default: false
baseUrl string

Default value is : https://cloud.getdbt.com

Default: "https://cloud.getdbt.com"
cause string

Default value is : Triggered by Kestra.

Default: "Triggered by Kestra."
dbtVersionOverride string
description string
disabled boolean

Default value is : false

Default: false
generateDocsOverride boolean | string
gitBranch string
gitSha string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string | string
parseRunResults boolean | string
pollFrequency string | string
schemaOverride string
stepsOverride string[]
targetNameOverride string
threadsOverride string
timeout string
format=duration
timeoutSecondsOverride integer | string
wait boolean | string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.db2.Capture object
Examples
snapshotMode: INITIAL
hostname: 127.0.0.1
port: "50000"
username: db2inst1
password: my_password
database: my_database
maxRecords: 100
database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.db2.Capture" required
Constant: "io.kestra.plugin.debezium.db2.Capture"
allowFailure boolean

Default value is : false

Default: false
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • ALWAYS: The connector performs a snapshot every time that it starts.
  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: After the connector starts, it performs a snapshot only if it detects one of the following circumstances: 1. It cannot detect any topic offsets. 2. A previously recorded offset specifies a log position that is not available on the server.
  • NO_DATA: The connector captures the structure of all relevant tables, performing all the steps described in the INITIAL, except that it does not create READ events to represent the data set at the point of the connector’s start-up.
  • RECOVERY: Set this option to restore a database schema history topic that is lost or corrupted. After a restart, the connector runs a snapshot that rebuilds the topic from the source tables.

Default value is : INITIAL

Default: "INITIAL"
Values: "ALWAYS" "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NO_DATA" "RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.db2.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.debezium.db2.Trigger instead.##### Examples

Consume a message from a DB2 database via change data capture in real-time.

id: debezium-db2
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: realtime
    type: io.kestra.plugin.debezium.db2.RealtimeTrigger
    hostname: 127.0.0.1
    port: 50000
    username: db2inst1
    password: my_password
    database: my_database

database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.db2.RealtimeTrigger" required
Constant: "io.kestra.plugin.debezium.db2.RealtimeTrigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
offsetsCommitMode string

Possible values are:

  • ON_EACH_BATCH: after each batch of records consumed by this trigger, the offsets will be stored in the KV Store. This avoids any duplicated records being consumed but can be costly if many events are produced.
  • ON_STOP: when this trigger is stopped or killed, the offsets will be stored in the KV Store. This avoid any un-necessary writes to the KV Store, but if the trigger is not stopped gracefully, the KV Store value may not be updated leading to duplicated records consumption.

Default value is : ON_EACH_BATCH

Default: "ON_EACH_BATCH"
Values: "ON_EACH_BATCH" "ON_STOP"
password string
properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • ALWAYS: The connector performs a snapshot every time that it starts.
  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: After the connector starts, it performs a snapshot only if it detects one of the following circumstances: 1. It cannot detect any topic offsets. 2. A previously recorded offset specifies a log position that is not available on the server.
  • NO_DATA: The connector captures the structure of all relevant tables, performing all the steps described in the INITIAL, except that it does not create READ events to represent the data set at the point of the connector’s start-up.
  • RECOVERY: Set this option to restore a database schema history topic that is lost or corrupted. After a restart, the connector runs a snapshot that rebuilds the topic from the source tables.

Default value is : INITIAL

Default: "INITIAL"
Values: "ALWAYS" "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NO_DATA" "RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.db2.Trigger object

If you would like to consume each message from change data capture in real-time and create one execution per message, you can use the io.kestra.plugin.debezium.db2.RealtimeTrigger instead.##### Examples

snapshotMode: INITIAL
hostname: 127.0.0.1
port: "50000"
username: db2inst1
password: my_password
database: my_database
maxRecords: 100
database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.db2.Trigger" required
Constant: "io.kestra.plugin.debezium.db2.Trigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • ALWAYS: The connector performs a snapshot every time that it starts.
  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: After the connector starts, it performs a snapshot only if it detects one of the following circumstances: 1. It cannot detect any topic offsets. 2. A previously recorded offset specifies a log position that is not available on the server.
  • NO_DATA: The connector captures the structure of all relevant tables, performing all the steps described in the INITIAL, except that it does not create READ events to represent the data set at the point of the connector’s start-up.
  • RECOVERY: Set this option to restore a database schema history topic that is lost or corrupted. After a restart, the connector runs a snapshot that rebuilds the topic from the source tables.

Default value is : INITIAL

Default: "INITIAL"
Values: "ALWAYS" "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NO_DATA" "RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.mongodb.Capture object
Examples

Replica set connection

snapshotMode: INITIAL
connectionString: mongodb://mongo_user:[email protected]:27017/?replicaSet=rs0
maxRecords: 100

Sharded connection

snapshotMode: INITIAL
connectionString: mongodb://mongo_user:[email protected]:27017,mongos1.example.com:27017/
maxRecords: 100

Replica set SRV connection

snapshotMode: INITIAL
connectionString: mongodb+srv://mongo_user:[email protected]/?replicaSet=rs0
maxRecords: 100

Sharded SRV connection

snapshotMode: INITIAL
connectionString: mongodb+srv://mongo_user:[email protected]/
maxRecords: 100
connectionString string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.mongodb.Capture" required
Constant: "io.kestra.plugin.debezium.mongodb.Capture"
allowFailure boolean

Default value is : false

Default: false
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedCollections

A list of regular expressions that match the collection namespaces (for example, .) of all collections to be excluded

excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedCollections

A list of regular expressions that match the collection namespaces (for example, .) of all collections to be monitored

includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • NO_DATA: The connector captures the structure of all relevant tables, performing all the steps described in the default snapshot workflow, except that it does not create READ events to represent the data set at the point of the connector’s start-up.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "NO_DATA" "WHEN_NEEDED"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.mongodb.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.debezium.mongodb.Trigger instead.##### Examples

Sharded connection

id: debezium-mongodb
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: realtime
    type: io.kestra.plugin.debezium.mongodb.RealtimeTrigger
    snapshotMode: INITIAL
    connectionString: mongodb://mongo_user:[email protected]:27017,mongos1.example.com:27017/

Replica set connection

id: debezium-mongodb
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: realtime
    type: io.kestra.plugin.debezium.mongodb.RealtimeTrigger
    snapshotMode: INITIAL
    connectionString: mongodb://mongo_user:[email protected]:27017/?replicaSet=rs0

connectionString string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.mongodb.RealtimeTrigger" required
Constant: "io.kestra.plugin.debezium.mongodb.RealtimeTrigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedCollections

A list of regular expressions that match the collection namespaces (for example, .) of all collections to be excluded

excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedCollections

A list of regular expressions that match the collection namespaces (for example, .) of all collections to be monitored

includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
offsetsCommitMode string

Possible values are:

  • ON_EACH_BATCH: after each batch of records consumed by this trigger, the offsets will be stored in the KV Store. This avoids any duplicated records being consumed but can be costly if many events are produced.
  • ON_STOP: when this trigger is stopped or killed, the offsets will be stored in the KV Store. This avoid any un-necessary writes to the KV Store, but if the trigger is not stopped gracefully, the KV Store value may not be updated leading to duplicated records consumption.

Default value is : ON_EACH_BATCH

Default: "ON_EACH_BATCH"
Values: "ON_EACH_BATCH" "ON_STOP"
password string
properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • NO_DATA: The connector captures the structure of all relevant tables, performing all the steps described in the default snapshot workflow, except that it does not create READ events to represent the data set at the point of the connector’s start-up.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "NO_DATA" "WHEN_NEEDED"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.mongodb.Trigger object

If you would like to consume each message from change data capture in real-time and create one execution per message, you can use the io.kestra.plugin.debezium.mongodb.RealtimeTrigger instead.##### Examples

Sharded connection

id: debezium-mongodb
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: trigger
    type: io.kestra.plugin.debezium.mongodb.Trigger
    snapshotMode: INITIAL
    connectionString: mongodb://mongo_user:[email protected]:27017,mongos1.example.com:27017/

Replica set connection

id: debezium-mongodb
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: trigger
    type: io.kestra.plugin.debezium.mongodb.Trigger
    snapshotMode: INITIAL
    connectionString: mongodb://mongo_user:[email protected]:27017/?replicaSet=rs0

connectionString string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.mongodb.Trigger" required
Constant: "io.kestra.plugin.debezium.mongodb.Trigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedCollections

A list of regular expressions that match the collection namespaces (for example, .) of all collections to be excluded

excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedCollections

A list of regular expressions that match the collection namespaces (for example, .) of all collections to be monitored

includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string
format=duration
maxRecords integer
maxWait string

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • NO_DATA: The connector captures the structure of all relevant tables, performing all the steps described in the default snapshot workflow, except that it does not create READ events to represent the data set at the point of the connector’s start-up.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "NO_DATA" "WHEN_NEEDED"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.mysql.Capture object
Examples
snapshotMode: NEVER
hostname: 127.0.0.1
port: "3306"
username: mysql_user
password: mysql_passwd
maxRecords: 100
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
serverId string required

This must be unique across all currently-running database processes in the MySQL cluster. This connector joins the MySQL database cluster as another server (with this unique ID) so it can read the binlog. By default, a random number between 5400 and 6400 is generated, though the recommendation is to explicitly set a value.

type const: "io.kestra.plugin.debezium.mysql.Capture" required
Constant: "io.kestra.plugin.debezium.mysql.Capture"
allowFailure boolean

Default value is : false

Default: false
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.
  • NEVER: The connector never uses snapshots. Upon first startup with a logical server name, the connector reads from the beginning of the binlog. Configure this behavior with care. It is valid only when the binlog is guaranteed to contain the entire history of the database.
  • SCHEMA_ONLY: The connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started.
  • SCHEMA_ONLY_RECOVERY: This is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to "clean up" a database history topic that has been growing unexpectedly. Database history topics require infinite retention.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NEVER" "SCHEMA_ONLY" "SCHEMA_ONLY_RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.mysql.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.debezium.mysql.Trigger instead.##### Examples

Consume a message from a MySQL database via change data capture in real-time.

id: debezium-mysql
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: realtime
    type: io.kestra.plugin.debezium.mysql.RealtimeTrigger
    serverId: 123456789
    hostname: 127.0.0.1
    port: 63306
    username: mysql_user
    password: mysql_passwd
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.mysql.RealtimeTrigger" required
Constant: "io.kestra.plugin.debezium.mysql.RealtimeTrigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
offsetsCommitMode string

Possible values are:

  • ON_EACH_BATCH: after each batch of records consumed by this trigger, the offsets will be stored in the KV Store. This avoids any duplicated records being consumed but can be costly if many events are produced.
  • ON_STOP: when this trigger is stopped or killed, the offsets will be stored in the KV Store. This avoid any un-necessary writes to the KV Store, but if the trigger is not stopped gracefully, the KV Store value may not be updated leading to duplicated records consumption.

Default value is : ON_EACH_BATCH

Default: "ON_EACH_BATCH"
Values: "ON_EACH_BATCH" "ON_STOP"
password string
properties object

Any additional configuration properties that is valid for the current driver.

serverId string

This must be unique across all currently-running database processes in the MySQL cluster. This connector joins the MySQL database cluster as another server (with this unique ID) so it can read the binlog. By default, a random number between 5400 and 6400 is generated, though the recommendation is to explicitly set a value.

snapshotMode string

Possible settings are:

  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.
  • NEVER: The connector never uses snapshots. Upon first startup with a logical server name, the connector reads from the beginning of the binlog. Configure this behavior with care. It is valid only when the binlog is guaranteed to contain the entire history of the database.
  • SCHEMA_ONLY: The connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started.
  • SCHEMA_ONLY_RECOVERY: This is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to "clean up" a database history topic that has been growing unexpectedly. Database history topics require infinite retention.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NEVER" "SCHEMA_ONLY" "SCHEMA_ONLY_RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.mysql.Trigger object

If you would like to consume each message from change data capture in real-time and create one execution per message, you can use the io.kestra.plugin.debezium.mysql.RealtimeTrigger instead.##### Examples

snapshotMode: NEVER
hostname: 127.0.0.1
port: "3306"
username: mysql_user
password: mysql_passwd
maxRecords: 100
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.mysql.Trigger" required
Constant: "io.kestra.plugin.debezium.mysql.Trigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
properties object

Any additional configuration properties that is valid for the current driver.

serverId string

This must be unique across all currently-running database processes in the MySQL cluster. This connector joins the MySQL database cluster as another server (with this unique ID) so it can read the binlog. By default, a random number between 5400 and 6400 is generated, though the recommendation is to explicitly set a value.

snapshotMode string

Possible settings are:

  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.
  • NEVER: The connector never uses snapshots. Upon first startup with a logical server name, the connector reads from the beginning of the binlog. Configure this behavior with care. It is valid only when the binlog is guaranteed to contain the entire history of the database.
  • SCHEMA_ONLY: The connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started.
  • SCHEMA_ONLY_RECOVERY: This is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to "clean up" a database history topic that has been growing unexpectedly. Database history topics require infinite retention.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NEVER" "SCHEMA_ONLY" "SCHEMA_ONLY_RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.oracle.Capture object
Examples

Non-container database (non-CDB)

snapshotMode: INITIAL
hostname: 127.0.0.1
port: "1521"
username: c##dbzuser
password: dbz
sid: ORCLCDB
maxRecords: 100

Container database (CDB)

snapshotMode: INITIAL
hostname: 127.0.0.1
port: "1521"
username: c##dbzuser
password: dbz
sid: ORCLCDB
pluggableDatabase: ORCLPDB1
maxRecords: 100
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
sid string required
type const: "io.kestra.plugin.debezium.oracle.Capture" required
Constant: "io.kestra.plugin.debezium.oracle.Capture"
allowFailure boolean

Default value is : false

Default: false
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
pluggableDatabase string

For non-container database (non-CDB) installation, do not specify the pluggableDatabase property.

properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • ALWAYS: The connector runs a snapshot on each connector start.
  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.
  • NO_DATA: The connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started.
  • RECOVERY: This is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to "clean up" a database history topic that has been growing unexpectedly. Database history topics require infinite retention.

Default value is : INITIAL

Default: "INITIAL"
Values: "ALWAYS" "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NO_DATA" "RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.oracle.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.debezium.oracle.Trigger instead.##### Examples

Consume a message from a Oracle database via change data capture in real-time.

id: debezium-oracle
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: realtime
    type: io.kestra.plugin.debezium.oracle.RealtimeTrigger
    hostname: 127.0.0.1
    port: 1521
    username: c##dbzuser
    password: dbz
    sid: ORCLCDB

hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
sid string required
type const: "io.kestra.plugin.debezium.oracle.RealtimeTrigger" required
Constant: "io.kestra.plugin.debezium.oracle.RealtimeTrigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
offsetsCommitMode string

Possible values are:

  • ON_EACH_BATCH: after each batch of records consumed by this trigger, the offsets will be stored in the KV Store. This avoids any duplicated records being consumed but can be costly if many events are produced.
  • ON_STOP: when this trigger is stopped or killed, the offsets will be stored in the KV Store. This avoid any un-necessary writes to the KV Store, but if the trigger is not stopped gracefully, the KV Store value may not be updated leading to duplicated records consumption.

Default value is : ON_EACH_BATCH

Default: "ON_EACH_BATCH"
Values: "ON_EACH_BATCH" "ON_STOP"
password string
pluggableDatabase string

For non-container database (non-CDB) installation, do not specify the pluggableDatabase property.

properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • ALWAYS: The connector runs a snapshot on each connector start.
  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.
  • NO_DATA: The connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started.
  • RECOVERY: This is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to "clean up" a database history topic that has been growing unexpectedly. Database history topics require infinite retention.

Default value is : INITIAL

Default: "INITIAL"
Values: "ALWAYS" "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NO_DATA" "RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.oracle.Trigger object

If you would like to consume each message from change data capture in real-time and create one execution per message, you can use the io.kestra.plugin.debezium.oracle.RealtimeTrigger instead.##### Examples

snapshotMode: INITIAL_ONLY
hostname: 127.0.0.1
port: "1521"
username: c##dbzuser
password: dbz
sid: ORCLCDB
maxRecords: 100
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
sid string required
type const: "io.kestra.plugin.debezium.oracle.Trigger" required
Constant: "io.kestra.plugin.debezium.oracle.Trigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
pluggableDatabase string

For non-container database (non-CDB) installation, do not specify the pluggableDatabase property.

properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • ALWAYS: The connector runs a snapshot on each connector start.
  • INITIAL: The connector runs a snapshot only when no offsets have been recorded for the logical server name.
  • INITIAL_ONLY: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.
  • WHEN_NEEDED: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.
  • NO_DATA: The connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started.
  • RECOVERY: This is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to "clean up" a database history topic that has been growing unexpectedly. Database history topics require infinite retention.

Default value is : INITIAL

Default: "INITIAL"
Values: "ALWAYS" "INITIAL" "INITIAL_ONLY" "WHEN_NEEDED" "NO_DATA" "RECOVERY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.postgres.Capture object
Examples
hostname: 127.0.0.1
port: "5432"
username: psql_user
password: psql_passwd
maxRecords: 100
database: my_database
pluginName: PGOUTPUT
snapshotMode: ALWAYS
database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.postgres.Capture" required
Constant: "io.kestra.plugin.debezium.postgres.Capture"
allowFailure boolean

Default value is : false

Default: false
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
pluginName string

If you are using a wal2json plug-in and transactions are very large, the JSON batch event that contains all transaction changes might not fit into the hard-coded memory buffer, which has a size of 1 GB. In such cases, switch to a streaming plug-in, by setting the plugin-name property to wal2json_streaming or wal2json_rds_streaming. With a streaming plug-in, PostgreSQL sends the connector a separate message for each change in a transaction.

Default value is : PGOUTPUT

Default: "PGOUTPUT"
Values: "DECODERBUFS" "WAL2JSON" "WAL2JSON_RDS" "WAL2JSON_STREAMING" "WAL2JSON_RDS_STREAMING" "PGOUTPUT"
properties object

Any additional configuration properties that is valid for the current driver.

publicationName string

This publication is created at start-up if it does not already exist and it includes all tables. Debezium then applies its own include/exclude list filtering, if configured, to limit the publication to change events for the specific tables of interest. The connector user must have superuser permissions to create this publication, so it is usually preferable to create the publication before starting the connector for the first time.

If the publication already exists, either for all tables or configured with a subset of tables, Debezium uses the publication as it is defined.

Default value is : kestra_publication

Default: "kestra_publication"
slotName string

The server uses this slot to stream events to the Debezium connector that you are configuring. Slot names must conform to PostgreSQL replication slot naming rules, which state: "Each replication slot has a name, which can contain lower-case letters, numbers, and the underscore character."

Default value is : kestra

Default: "kestra"
snapshotMode string

Possible settings are:

  • INITIAL: The connector performs a snapshot only when no offsets have been recorded for the logical server name.
  • ALWAYS: The connector performs a snapshot each time the connector starts.
  • NEVER: The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the PostgreSQL logical replication slot was created on the server. The never snapshot mode is useful only when you know all data of interest is still reflected in the WAL.
  • INITIAL_ONLY: The connector performs an initial snapshot and then stops, without processing any subsequent changes.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "ALWAYS" "NEVER" "INITIAL_ONLY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
sslCert string

Must be a PEM encoded certificate.

sslKey string

Must be a PEM encoded key.

sslKeyPassword string
sslMode string

Default value is : DISABLE

Default: "DISABLE"
Values: "DISABLE" "REQUIRE" "VERIFY_CA" "VERIFY_FULL"
sslRootCert string

Must be a PEM encoded certificate.

stateName string

Default value is : debezium-state

Default: "debezium-state"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.postgres.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.debezium.postgres.Trigger instead.##### Examples

Consume a message from a PostgreSQL database via change data capture in real-time.

id: debezium-postgres
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: realtime
    type: io.kestra.plugin.debezium.postgres.RealtimeTrigger
    database: postgres
    hostname: 127.0.0.1
    port: 65432
    username: postgres
    password: pg_passwd
database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.postgres.RealtimeTrigger" required
Constant: "io.kestra.plugin.debezium.postgres.RealtimeTrigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
offsetsCommitMode string

Possible values are:

  • ON_EACH_BATCH: after each batch of records consumed by this trigger, the offsets will be stored in the KV Store. This avoids any duplicated records being consumed but can be costly if many events are produced.
  • ON_STOP: when this trigger is stopped or killed, the offsets will be stored in the KV Store. This avoid any un-necessary writes to the KV Store, but if the trigger is not stopped gracefully, the KV Store value may not be updated leading to duplicated records consumption.

Default value is : ON_EACH_BATCH

Default: "ON_EACH_BATCH"
Values: "ON_EACH_BATCH" "ON_STOP"
password string
pluginName string

If you are using a wal2json plug-in and transactions are very large, the JSON batch event that contains all transaction changes might not fit into the hard-coded memory buffer, which has a size of 1 GB. In such cases, switch to a streaming plug-in, by setting the plugin-name property to wal2json_streaming or wal2json_rds_streaming. With a streaming plug-in, PostgreSQL sends the connector a separate message for each change in a transaction.

Default value is : PGOUTPUT

Default: "PGOUTPUT"
Values: "DECODERBUFS" "WAL2JSON" "WAL2JSON_RDS" "WAL2JSON_STREAMING" "WAL2JSON_RDS_STREAMING" "PGOUTPUT"
properties object

Any additional configuration properties that is valid for the current driver.

publicationName string

This publication is created at start-up if it does not already exist and it includes all tables. Debezium then applies its own include/exclude list filtering, if configured, to limit the publication to change events for the specific tables of interest. The connector user must have superuser permissions to create this publication, so it is usually preferable to create the publication before starting the connector for the first time.

If the publication already exists, either for all tables or configured with a subset of tables, Debezium uses the publication as it is defined.

Default value is : kestra_publication

Default: "kestra_publication"
slotName string

The server uses this slot to stream events to the Debezium connector that you are configuring. Slot names must conform to PostgreSQL replication slot naming rules, which state: "Each replication slot has a name, which can contain lower-case letters, numbers, and the underscore character."

Default value is : kestra

Default: "kestra"
snapshotMode string

Possible settings are:

  • INITIAL: The connector performs a snapshot only when no offsets have been recorded for the logical server name.
  • ALWAYS: The connector performs a snapshot each time the connector starts.
  • NEVER: The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the PostgreSQL logical replication slot was created on the server. The never snapshot mode is useful only when you know all data of interest is still reflected in the WAL.
  • INITIAL_ONLY: The connector performs an initial snapshot and then stops, without processing any subsequent changes.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "ALWAYS" "NEVER" "INITIAL_ONLY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
sslCert string

Must be a PEM encoded certificate.

sslKey string

Must be a PEM encoded key.

sslKeyPassword string
sslMode string

Default value is : DISABLE

Default: "DISABLE"
Values: "DISABLE" "REQUIRE" "VERIFY_CA" "VERIFY_FULL"
sslRootCert string

Must be a PEM encoded certificate.

stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.postgres.Trigger object

If you would like to consume each message from change data capture in real-time and create one execution per message, you can use the io.kestra.plugin.debezium.postgres.RealtimeTrigger instead.##### Examples

hostname: 127.0.0.1
port: "5432"
username: posgres
password: psql_passwd
maxRecords: 100
database: my_database
pluginName: PGOUTPUT
snapshotMode: ALWAYS
database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.postgres.Trigger" required
Constant: "io.kestra.plugin.debezium.postgres.Trigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
pluginName string

If you are using a wal2json plug-in and transactions are very large, the JSON batch event that contains all transaction changes might not fit into the hard-coded memory buffer, which has a size of 1 GB. In such cases, switch to a streaming plug-in, by setting the plugin-name property to wal2json_streaming or wal2json_rds_streaming. With a streaming plug-in, PostgreSQL sends the connector a separate message for each change in a transaction.

Default value is : PGOUTPUT

Default: "PGOUTPUT"
Values: "DECODERBUFS" "WAL2JSON" "WAL2JSON_RDS" "WAL2JSON_STREAMING" "WAL2JSON_RDS_STREAMING" "PGOUTPUT"
properties object

Any additional configuration properties that is valid for the current driver.

publicationName string

This publication is created at start-up if it does not already exist and it includes all tables. Debezium then applies its own include/exclude list filtering, if configured, to limit the publication to change events for the specific tables of interest. The connector user must have superuser permissions to create this publication, so it is usually preferable to create the publication before starting the connector for the first time.

If the publication already exists, either for all tables or configured with a subset of tables, Debezium uses the publication as it is defined.

Default value is : kestra_publication

Default: "kestra_publication"
slotName string

The server uses this slot to stream events to the Debezium connector that you are configuring. Slot names must conform to PostgreSQL replication slot naming rules, which state: "Each replication slot has a name, which can contain lower-case letters, numbers, and the underscore character."

Default value is : kestra

Default: "kestra"
snapshotMode string

Possible settings are:

  • INITIAL: The connector performs a snapshot only when no offsets have been recorded for the logical server name.
  • ALWAYS: The connector performs a snapshot each time the connector starts.
  • NEVER: The connector never performs snapshots. When a connector is configured this way, its behavior when it starts is as follows. If there is a previously stored LSN, the connector continues streaming changes from that position. If no LSN has been stored, the connector starts streaming changes from the point in time when the PostgreSQL logical replication slot was created on the server. The never snapshot mode is useful only when you know all data of interest is still reflected in the WAL.
  • INITIAL_ONLY: The connector performs an initial snapshot and then stops, without processing any subsequent changes.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "ALWAYS" "NEVER" "INITIAL_ONLY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
sslCert string

Must be a PEM encoded certificate.

sslKey string

Must be a PEM encoded key.

sslKeyPassword string
sslMode string

Default value is : DISABLE

Default: "DISABLE"
Values: "DISABLE" "REQUIRE" "VERIFY_CA" "VERIFY_FULL"
sslRootCert string

Must be a PEM encoded certificate.

stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.sqlserver.Capture object
Examples
snapshotMode: INITIAL
hostname: 127.0.0.1
port: "1433"
username: sqlserver_user
password: sqlserver_passwd
maxRecords: 100
database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.sqlserver.Capture" required
Constant: "io.kestra.plugin.debezium.sqlserver.Capture"
allowFailure boolean

Default value is : false

Default: false
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
properties object

Any additional configuration properties that is valid for the current driver.

snapshotMode string

Possible settings are:

  • INITIAL: Takes a snapshot of structure and data of captured tables; useful if topics should be populated with a complete representation of the data from the captured tables.
  • INITIAL_ONLY: Takes a snapshot of structure and data like initial but instead does not transition into streaming changes once the snapshot has completed.
  • SCHEMA_ONLY: Takes a snapshot of the structure of captured tables only; useful if only changes happening from now onwards should be propagated to topics.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "SCHEMA_ONLY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.sqlserver.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.debezium.sqlserver.Trigger instead.##### Examples

Consume a message from a SQL Server database via change data capture in real-time.

id: debezium-sqlserver
namespace: company.team

tasks:
  - id: send_data
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: realtime
    type: io.kestra.plugin.debezium.sqlserver.RealtimeTrigger
    hostname: 127.0.0.1
    port: 61433
    username: sa
    password: password
    database: deb
database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.sqlserver.RealtimeTrigger" required
Constant: "io.kestra.plugin.debezium.sqlserver.RealtimeTrigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
offsetsCommitMode string

Possible values are:

  • ON_EACH_BATCH: after each batch of records consumed by this trigger, the offsets will be stored in the KV Store. This avoids any duplicated records being consumed but can be costly if many events are produced.
  • ON_STOP: when this trigger is stopped or killed, the offsets will be stored in the KV Store. This avoid any un-necessary writes to the KV Store, but if the trigger is not stopped gracefully, the KV Store value may not be updated leading to duplicated records consumption.

Default value is : ON_EACH_BATCH

Default: "ON_EACH_BATCH"
Values: "ON_EACH_BATCH" "ON_STOP"
password string
properties object

Any additional configuration properties that is valid for the current driver.

serverId string
snapshotMode string

Possible settings are:

  • INITIAL: Takes a snapshot of structure and data of captured tables; useful if topics should be populated with a complete representation of the data from the captured tables.
  • INITIAL_ONLY: Takes a snapshot of structure and data like initial but instead does not transition into streaming changes once the snapshot has completed.
  • SCHEMA_ONLY: Takes a snapshot of the structure of captured tables only; useful if only changes happening from now onwards should be propagated to topics.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "SCHEMA_ONLY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.debezium.sqlserver.Trigger object

If you would like to consume each message from change data capture in real-time and create one execution per message, you can use the io.kestra.plugin.debezium.sqlserver.RealtimeTrigger instead.##### Examples

snapshotMode: INITIAL
hostname: 127.0.0.1
port: "1433"
username: sqlserver_user
password: sqlserver_passwd
database: deb
maxRecords: 100
database string required
hostname string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port string required
type const: "io.kestra.plugin.debezium.sqlserver.Trigger" required
Constant: "io.kestra.plugin.debezium.sqlserver.Trigger"
conditions array
deleted string

Possible settings are:

  • ADD_FIELD: Add a deleted field as boolean.
  • NULL: Send a row with all values as null.
  • DROP: Don't send deleted row.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "NULL" "DROP"
deletedFieldName string

Default value is : deleted

Default: "deleted"
description string
disabled boolean

Default value is : false

Default: false
excludedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the includedColumns connector configuration property."

excludedDatabases

The connector captures changes in any database whose name is not in the excludedDatabases. Do not also set the includedDatabases connector configuration property.

excludedTables

The connector captures changes in any table not included in excludedTables. Each identifier is of the form databaseName.tableName. Do not also specify the includedTables connector configuration property.

format string

Possible settings are:

  • RAW: Send raw data from debezium.
  • INLINE: Send a row like in the source with only data (remove after & before), all the columns will be present for each row.
  • WRAP: Send a row like INLINE but wrapped in a record field.

Default value is : INLINE

Default: "INLINE"
Values: "RAW" "INLINE" "WRAP"
ignoreDdl boolean

Ignore CREATE, ALTER, DROP and TRUNCATE operations.

Default value is : true

Default: true
includedColumns

Fully-qualified names for columns are of the form databaseName.tableName.columnName. Do not also specify the excludedColumns connector configuration property.

includedDatabases

The connector does not capture changes in any database whose name is not in includedDatabases. By default, the connector captures changes in all databases. Do not also set the excludedDatabases connector configuration property.

includedTables

The connector does not capture changes in any table not included in includedTables. Each identifier is of the form databaseName.tableName. By default, the connector captures changes in every non-system table in each database whose changes are being captured. Do not also specify the excludedTables connector configuration property.

interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
key string

Possible settings are:

  • ADD_FIELD: Add key(s) merged with columns.
  • DROP: Drop keys.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

maxSnapshotDuration string

It's not an hard limit and is evaluated every second. The properties 'maxRecord', 'maxDuration' and 'maxWait' are evaluated only after the snapshot is done.

Default value is : 3600.000000000

Default: 3600.0
format=duration
maxWait string

It's not an hard limit and is evaluated every second. It is taken into account after the snapshot if any.

Default value is : 10.000000000

Default: 10.0
format=duration
metadata string

Possible settings are:

  • ADD_FIELD: Add metadata in a column named metadata.
  • DROP: Drop metadata.

Default value is : ADD_FIELD

Default: "ADD_FIELD"
Values: "ADD_FIELD" "DROP"
metadataFieldName string

Default value is : metadata

Default: "metadata"
password string
properties object

Any additional configuration properties that is valid for the current driver.

serverId string
snapshotMode string

Possible settings are:

  • INITIAL: Takes a snapshot of structure and data of captured tables; useful if topics should be populated with a complete representation of the data from the captured tables.
  • INITIAL_ONLY: Takes a snapshot of structure and data like initial but instead does not transition into streaming changes once the snapshot has completed.
  • SCHEMA_ONLY: Takes a snapshot of the structure of captured tables only; useful if only changes happening from now onwards should be propagated to topics.

Default value is : INITIAL

Default: "INITIAL"
Values: "INITIAL" "INITIAL_ONLY" "SCHEMA_ONLY"
splitTable string

Possible settings are:

  • TABLE: This will split all rows by tables on output with name database.table
  • DATABASE: This will split all rows by databases on output with name database.
  • OFF: This will NOT split all rows resulting in a single data output.

Default value is : TABLE

Default: "TABLE"
Values: "OFF" "DATABASE" "TABLE"
stateName string

Default value is : debezium-state

Default: "debezium-state"
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.docker.Build object
Examples

Build and push a Docker image to a registry

id: docker_build
namespace: company.team

tasks:
  - id: build
    type: io.kestra.plugin.docker.Build
    dockerfile: |
      FROM ubuntu
      ARG APT_PACKAGES=""

      RUN apt-get update && apt-get install -y --no-install-recommends ${APT_PACKAGES};
    platforms:
      - linux/amd64
    tags:
      - private-registry.io/unit-test:latest
    buildArgs:
      APT_PACKAGES: curl
    labels:
      unit-test: "true"
    credentials:
      registry: <registry.url.com>
      username: <your-user>
      password: <your-password>

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tags string[] required
type const: "io.kestra.plugin.docker.Build" required
Constant: "io.kestra.plugin.docker.Build"
allowFailure boolean

Default value is : false

Default: false
buildArgs Record<string, string>
credentials
All of: Credentials for a private container registry. object, Credentials to push your image to a container registry.
description string
disabled boolean

Default value is : false

Default: false
dockerfile string
host string
inputFiles object | string
labels Record<string, string>
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
platforms string[]
protocol string

Default value is : HTTPS

Default: "HTTPS"
Values: "HTTP" "HTTPS"
pull boolean

Default value is : true

Default: true
push boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.docker.Run object
Examples

Run the docker/whalesay container with the command 'cowsay hello'

id: docker_run
namespace: company.team

tasks:
  - id: run
    type: io.kestra.plugin.docker.Run
    containerImage: docker/whalesay
    commands:
      - cowsay
      - hello

Run the docker/whalesay container with no command

id: docker_run
namespace: company.team

tasks:
  - id: run
    type: io.kestra.plugin.docker.Run
    containerImage: docker/whalesay

containerImage string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.docker.Run" required
Constant: "io.kestra.plugin.docker.Run"
allowFailure boolean

Default value is : false

Default: false
commands string[]

Default value is : []

Default value is : []

Default:
[]
config string | object

Docker configuration file that can set access credentials to private container registries. Usually located in ~/.docker/config.json.

cpu
All of: io.kestra.plugin.scripts.runner.docker.Cpu object, Limits the CPU usage to a given maximum threshold value.
credentials
All of: Credentials for a private container registry. object, Credentials for a private container registry.
description string
deviceRequests array
disabled boolean

Default value is : false

Default: false
entryPoint string[]
env Record<string, string>
extraHosts string[]
host string
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
memory
All of: io.kestra.plugin.scripts.runner.docker.Memory object, Limits memory usage to a given maximum threshold value.
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
networkMode string
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

pullPolicy
All of: The image pull policy for a container image and the tag of the image, which affect when Docker attempts to pull (download) the specified image. string, The pull policy for an image.
shmSize string

The size must be greater than 0. If omitted, the system uses 64MB.

timeout string
format=duration
user string
volumes string[]

Must be a valid mount expression as string, example : /home/user:/app.

Volumes mount are disabled by default for security reasons; you must enable them on server configuration by setting kestra.tasks.scripts.docker.volume-enabled to true.

warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.elasticsearch.Bulk object
Examples
id: elasticsearch_bulk_load
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: bulk_load
    type: io.kestra.plugin.elasticsearch.Bulk
    connection:
      hosts:
       - "http://localhost:9200"
    from: "{{ inputs.file }}"

connection required
All of: io.kestra.plugin.elasticsearch.ElasticsearchConnection object, The connection properties.
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.elasticsearch.Bulk" required
Constant: "io.kestra.plugin.elasticsearch.Bulk"
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
routing string

Using this value to hash the shard and not the id.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.elasticsearch.ElasticsearchConnection object
hosts string[] required

Must be an URI like https://elasticsearch.com:9200 with scheme and port.

minItems=1
basicAuth
All of: io.kestra.plugin.elasticsearch.ElasticsearchConnection-BasicAuth object, Basic auth configuration.
headers string[]

Must be a string with key value separated with :, ex: Authorization: Token XYZ.

pathPrefix string

For example, if this is set to /my/path, then any client request will become /my/path/ + endpoint. In essence, every request's endpoint is prefixed by this pathPrefix. The path prefix is useful for when ElasticSearch is behind a proxy that provides a base path or a proxy that requires all paths to start with '/'; it is not intended for other purposes and it should not be supplied in other scenarios.

strictDeprecationMode boolean
trustAllSsl boolean

Use this if the server is using a self signed SSL certificate.

io.kestra.plugin.elasticsearch.ElasticsearchConnection-BasicAuth object
password string
username string
io.kestra.plugin.elasticsearch.Get object
Examples
id: elasticsearch_get
namespace: company.team

tasks:
  - id: get
    type: io.kestra.plugin.elasticsearch.Get
    connection:
      hosts:
       - "http://localhost:9200"
    index: "my_index"
    key: "my_id"

connection required
All of: io.kestra.plugin.elasticsearch.ElasticsearchConnection object, The connection properties.
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
index string required
key string required
type const: "io.kestra.plugin.elasticsearch.Get" required
Constant: "io.kestra.plugin.elasticsearch.Get"
version integer required

which will cause the get operation to only be performed if a matching version exists and no changes happened on the doc since then.

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
routing string

Using this value to hash the shard and not the id.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.elasticsearch.Load object
Examples
id: elasticsearch_load
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: load
    type: io.kestra.plugin.elasticsearch.Load
    connection:
      hosts:
       - "http://localhost:9200"
    from: "{{ inputs.file }}"
    index: "my_index"

connection required
All of: io.kestra.plugin.elasticsearch.ElasticsearchConnection object, The connection properties.
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
index string required
type const: "io.kestra.plugin.elasticsearch.Load" required
Constant: "io.kestra.plugin.elasticsearch.Load"
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
description string
disabled boolean

Default value is : false

Default: false
idKey string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
opType string
Values: "INDEX" "CREATE" "UPDATE" "DELETE"
removeIdKey boolean

Default value is : true

Default: true
routing string

Using this value to hash the shard and not the id.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.elasticsearch.Put object
Examples

Put a document with a Map.

id: elasticsearch_put
namespace: company.team

tasks:
  - id: put
    type: io.kestra.plugin.elasticsearch.Put
    connection:
      hosts:
       - "http://localhost:9200"
    index: "my_index"
    key: "my_id"
    value:
      name: "John Doe"
      city: "Paris"

Put a document from a JSON string.

id: elasticsearch_put
namespace: company.team

inputs:
  - id: value
    type: JSON
    defaults: {"name": "John Doe", "city": "Paris"}

tasks:
  - id: put
    type: io.kestra.plugin.elasticsearch.Put
    connection:
      hosts:
       - "http://localhost:9200"
    index: "my_index"
    key: "my_id"
    value: "{{ inputs.value }}"

connection required
All of: io.kestra.plugin.elasticsearch.ElasticsearchConnection object, The connection properties.
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
index string required
type const: "io.kestra.plugin.elasticsearch.Put" required
Constant: "io.kestra.plugin.elasticsearch.Put"
allowFailure boolean

Default value is : false

Default: false
contentType string

Default value is : JSON

Default: "JSON"
Values: "CBOR" "JSON" "SMILE" "YAML"
description string
disabled boolean

Default value is : false

Default: false
key string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
opType string
Values: "INDEX" "CREATE" "UPDATE" "DELETE"
refreshPolicy string

an immediate refresh IMMEDIATE, wait for a refresh WAIT_UNTIL, or proceed ignore refreshes entirely NONE.

Default value is : NONE

Default: "NONE"
Values: "IMMEDIATE" "WAIT_UNTIL" "NONE"
routing string

Using this value to hash the shard and not the id.

timeout string
format=duration
value

Can be a string. In this case, the contentType will be used or a raw Map.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.elasticsearch.Request object
Examples

Inserting a document in an index using POST request.

id: elasticsearch_request
namespace: company.team

tasks:
  - id: request_post
    type: io.kestra.plugin.elasticsearch.Request
    connection:
      hosts:
       - "http://localhost:9200"
    method: "POST"
    endpoint: "my_index/_doc/john"
    body:
      name: "john"

Searching for documents using GET request.

id: elasticsearch_request
namespace: company.team

tasks:
  - id: request_get
    type: io.kestra.plugin.elasticsearch.Request
    connection:
      hosts:
       - "http://localhost:9200"
    method: "GET"
    endpoint: "my_index/_search"
    parameters:
      q: "name:"John Doe""

Deleting document using DELETE request.

id: elasticsearch_request
namespace: company.team

tasks:
  - id: request_delete
    type: io.kestra.plugin.elasticsearch.Request
    connection:
      hosts:
       - "http://localhost:9200"
    method: "DELETE"
    endpoint: "my_index/_doc/<_id>"

connection required
All of: io.kestra.plugin.elasticsearch.ElasticsearchConnection object, The connection properties.
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.elasticsearch.Request" required
Constant: "io.kestra.plugin.elasticsearch.Request"
allowFailure boolean

Default value is : false

Default: false
body

Can be a JSON string or raw Map that will be converted to json.

description string
disabled boolean

Default value is : false

Default: false
endpoint string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
method string

Default value is : GET

Default: "GET"
Values: "CONNECT" "CUSTOM" "DELETE" "GET" "HEAD" "OPTIONS" "PATCH" "POST" "PUT" "TRACE"
parameters Record<string, string>
routing string

Using this value to hash the shard and not the id.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.elasticsearch.Scroll object

Get all documents from a search request and store it as Kestra Internal Storage file.##### Examples

id: elasticsearch_scroll
namespace: company.team

tasks:
  - id: scroll
    type: io.kestra.plugin.elasticsearch.Scroll
    connection:
      hosts:
        - "http://localhost:9200"
    indexes:
      - "my_index"
    request:
      query:
        term:
          name:
            value: 'john'

connection required
All of: io.kestra.plugin.elasticsearch.ElasticsearchConnection object, The connection properties.
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.elasticsearch.Scroll" required
Constant: "io.kestra.plugin.elasticsearch.Scroll"
allowFailure boolean

Default value is : false

Default: false
contentType string

Default value is : JSON

Default: "JSON"
Values: "CBOR" "JSON" "SMILE" "YAML"
description string
disabled boolean

Default value is : false

Default: false
indexes string[]

Default to all indices.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
request

Can be a JSON string. In this case, the contentType will be used or a raw Map.

routing string

Using this value to hash the shard and not the id.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.elasticsearch.Search object

Get all documents from a search request and store it as outputs.##### Examples

id: elasticsearch_search
namespace: company.team

tasks:
  - id: search
    type: io.kestra.plugin.elasticsearch.Search
    connection:
      hosts:
        - "http://localhost:9200"
    indexes:
      - "my_index"
    request:
      query:
        term:
          name:
            value: 'john'

connection required
All of: io.kestra.plugin.elasticsearch.ElasticsearchConnection object, The connection properties.
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.elasticsearch.Search" required
Constant: "io.kestra.plugin.elasticsearch.Search"
allowFailure boolean

Default value is : false

Default: false
contentType string

Default value is : JSON

Default: "JSON"
Values: "CBOR" "JSON" "SMILE" "YAML"
description string
disabled boolean

Default value is : false

Default: false
fetchType string

FETCH_ONE output the first row, FETCH output all the rows, STORE store all rows in a file, NONE do nothing.

Default value is : FETCH

Default: "FETCH"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
indexes string[]

Default to all indices.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
request

Can be a JSON string. In this case, the contentType will be used or a raw Map.

routing string

Using this value to hash the shard and not the id.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fivetran.connectors.Sync object
Examples
id: fivetran_sync
namespace: company.team

tasks:
  - id: sync
    type: io.kestra.plugin.fivetran.connectors.Sync
    apiKey: "api_key"
    apiSecret: "api_secret"
    connectorId: "connector_id"

apiKey string required
apiSecret string required
connectorId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fivetran.connectors.Sync" required
Constant: "io.kestra.plugin.fivetran.connectors.Sync"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
force boolean

If force is true and the connector is currently syncing, it will stop the sync and re-run it. If force is false, the connector will sync only if it isn't currently syncing.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 3600.000000000

Default: 3600.0
format=duration
timeout string
format=duration
wait boolean

Allowing to capture job status & logs.

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftp.Delete object
Examples
id: fs_ftp_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.fs.ftp.Delete
    host: localhost
    port: 21
    username: foo
    password: pass
    uri: "/upload/dir1/file.txt"

host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftp.Delete" required
Constant: "io.kestra.plugin.fs.ftp.Delete"
uri string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 21

Default: "21"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftp.Download object
Examples
id: fs_ftp_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.fs.ftp.Download
    host: localhost
    port: 21
    username: foo
    password: pass
    from: "/in/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftp.Download" required
Constant: "io.kestra.plugin.fs.ftp.Download"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 21

Default: "21"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftp.Downloads object
Examples

Download a list of files and move it to an archive folders

id: fs_ftp_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.fs.ftp.Downloads
    host: localhost
    port: 21
    username: foo
    password: pass
    from: "/in/"
    interval: PT10S
    action: MOVE
    moveDirectory: "/archive/"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftp.Downloads" required
Constant: "io.kestra.plugin.fs.ftp.Downloads"
action string
Values: "MOVE" "DELETE" "NONE"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveDirectory string
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 21

Default: "21"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
recursive boolean

Default value is : false

Default: false
regExp string
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftp.List object
Examples
id: fs_ftp_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.fs.ftp.List
    host: localhost
    port: 21
    username: foo
    password: pass
    from: "/upload/dir1/"
    regExp: ".*\/dir1\/.*.(yaml|yml)"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftp.List" required
Constant: "io.kestra.plugin.fs.ftp.List"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 21

Default: "21"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
recursive boolean

Default value is : false

Default: false
regExp string
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftp.Move object

If the destination directory doesn't exist, it will be created##### Examples

id: fs_ftp_move
namespace: company.team

tasks:
  - id: move
    type: io.kestra.plugin.fs.ftp.Move
    host: localhost
    port: 21
    username: foo
    password: pass
    from: "/upload/dir1/file.txt"
    to: "/upload/dir2/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required

The full destination path (with filename optionally) If end with a /, the destination is considered as a directory and filename will be happen If the destFile exists, it is deleted first.

type const: "io.kestra.plugin.fs.ftp.Move" required
Constant: "io.kestra.plugin.fs.ftp.Move"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 21

Default: "21"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftp.Trigger object
Examples

Wait for one or more files in a given FTP server's directory and process each of these files sequentially.

id: ftp_trigger_flow
namespace: company.team

tasks:
  - id: for_each_file
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.ftp.Trigger
    host: localhost
    port: 21
    username: foo
    password: bar
    from: "/in/"
    interval: PT10S
    action: MOVE
    moveDirectory: "/archive/"

Wait for one or more files in a given FTP server's directory and process each of these files sequentially. Delete files manually after processing to prevent infinite triggering.

id: ftp_trigger_flow
namespace: company.team

tasks:
  - id: for_each_file
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.name') }}"
      - id: delete
        type: io.kestra.plugin.fs.ftp.Delete
        host: localhost
        port: 21
        username: foo
        password: bar
        uri: "/in/{{ taskrun.value | jq('.name') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.ftp.Trigger
    host: localhost
    port: 21
    username: foo
    password: bar
    from: "/in/"
    interval: PT10S
    action: NONE

Wait for one or more files in a given FTP server's directory and process each of these files sequentially. In this example, we restrict the trigger to only wait for CSV files in the mydir directory.

id: ftp_wait_for_csv_in_mydir
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.ftp.Trigger
    host: localhost
    port: "21"
    username: foo
    password: bar
    from: "mydir/"
    regExp: ".*.csv"
    action: MOVE
    moveDirectory: "archive/"
    interval: PTS

action string required
Values: "MOVE" "DELETE" "NONE"
from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftp.Trigger" required
Constant: "io.kestra.plugin.fs.ftp.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
interval string

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveDirectory string
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 21

Default: "21"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
recursive boolean

Default value is : false

Default: false
regExp string
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftp.Upload object
Examples
id: fs_ftp_upload
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: upload
    type: io.kestra.plugin.fs.ftp.Upload
    host: localhost
    port: 21
    username: foo
    password: pass
    from: "{{ inputs.file }}"
    to: "/upload/dir2/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftp.Upload" required
Constant: "io.kestra.plugin.fs.ftp.Upload"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 21

Default: "21"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
to string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftp.Uploads object
Examples
id: fs_ftp_uploads
namespace: company.team

inputs:
  - id: file1
    type: FILE
  - id: file2
    type: FILE

tasks:
  - id: uploads
    type: io.kestra.plugin.fs.ftp.Uploads
    host: localhost
    port: 21
    username: foo
    password: pass
    from:
      - "{{ inputs.file1 }}"
      - "{{ inputs.file2 }}"
    to: "/upload/dir2"

from string | string[] required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required
type const: "io.kestra.plugin.fs.ftp.Uploads" required
Constant: "io.kestra.plugin.fs.ftp.Uploads"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 21

Default: "21"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftps.Delete object
Examples
id: fs_ftps_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.fs.ftps.Delete
    host: localhost
    port: 990
    username: foo
    password: pass
    uri: "/upload/dir1/file.txt"

host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftps.Delete" required
Constant: "io.kestra.plugin.fs.ftps.Delete"
uri string required
allowFailure boolean

Default value is : false

Default: false
dataChannelProtectionLevel string

Default value is : P

Default: "P"
Values: "C" "S" "E" "P"
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
insecureTrustAllCertificates boolean

Note: This makes the SSL connection insecure, and should only be used for testing.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mode string

Default value is : EXPLICIT

Default: "EXPLICIT"
Values: "IMPLICIT" "EXPLICIT"
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 990

Default: "990"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftps.Download object
Examples
id: fs_ftps_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.fs.ftps.Download
    host: localhost
    port: 990
    username: foo
    password: pass
    from: "/in/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftps.Download" required
Constant: "io.kestra.plugin.fs.ftps.Download"
allowFailure boolean

Default value is : false

Default: false
dataChannelProtectionLevel string

Default value is : P

Default: "P"
Values: "C" "S" "E" "P"
description string
disabled boolean

Default value is : false

Default: false
insecureTrustAllCertificates boolean

Note: This makes the SSL connection insecure, and should only be used for testing.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mode string

Default value is : EXPLICIT

Default: "EXPLICIT"
Values: "IMPLICIT" "EXPLICIT"
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 990

Default: "990"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftps.Downloads object
Examples

Download a list of files and move it to an archive folders

id: fs_ftps_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.fs.ftps.Downloads
    host: localhost
    port: 990
    username: foo
    password: pass
    from: "/in/"
    interval: PT10S
    action: MOVE
    moveDirectory: "/archive/"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftps.Downloads" required
Constant: "io.kestra.plugin.fs.ftps.Downloads"
action string
Values: "MOVE" "DELETE" "NONE"
allowFailure boolean

Default value is : false

Default: false
dataChannelProtectionLevel string

Default value is : P

Default: "P"
Values: "C" "S" "E" "P"
description string
disabled boolean

Default value is : false

Default: false
insecureTrustAllCertificates boolean

Note: This makes the SSL connection insecure, and should only be used for testing.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mode string

Default value is : EXPLICIT

Default: "EXPLICIT"
Values: "IMPLICIT" "EXPLICIT"
moveDirectory string
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 990

Default: "990"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
recursive boolean

Default value is : false

Default: false
regExp string
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftps.List object
Examples
id: fs_ftps_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.fs.ftps.List
    host: localhost
    port: 990
    username: foo
    password: pass
    from: "/upload/dir1/"
    regExp: ".*\/dir1\/.*.(yaml|yml)"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftps.List" required
Constant: "io.kestra.plugin.fs.ftps.List"
allowFailure boolean

Default value is : false

Default: false
dataChannelProtectionLevel string

Default value is : P

Default: "P"
Values: "C" "S" "E" "P"
description string
disabled boolean

Default value is : false

Default: false
insecureTrustAllCertificates boolean

Note: This makes the SSL connection insecure, and should only be used for testing.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mode string

Default value is : EXPLICIT

Default: "EXPLICIT"
Values: "IMPLICIT" "EXPLICIT"
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 990

Default: "990"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
recursive boolean

Default value is : false

Default: false
regExp string
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftps.Move object

If the destination directory doesn't exist, it will be created##### Examples

id: fs_ftps_move
namespace: company.team

tasks:
  - id: move
    type: io.kestra.plugin.fs.ftps.Move
    host: localhost
    port: 990
    username: foo
    password: pass
    from: "/upload/dir1/file.txt"
    to: "/upload/dir2/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required

The full destination path (with filename optionally) If end with a /, the destination is considered as a directory and filename will be happen If the destFile exists, it is deleted first.

type const: "io.kestra.plugin.fs.ftps.Move" required
Constant: "io.kestra.plugin.fs.ftps.Move"
allowFailure boolean

Default value is : false

Default: false
dataChannelProtectionLevel string

Default value is : P

Default: "P"
Values: "C" "S" "E" "P"
description string
disabled boolean

Default value is : false

Default: false
insecureTrustAllCertificates boolean

Note: This makes the SSL connection insecure, and should only be used for testing.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mode string

Default value is : EXPLICIT

Default: "EXPLICIT"
Values: "IMPLICIT" "EXPLICIT"
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 990

Default: "990"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftps.Trigger object
Examples

Wait for one or more files in a given FTPS server's directory and process each of these files sequentially.

id: ftps_trigger_flow
namespace: company.team

tasks:
  - id: for_each_file
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.ftps.Trigger
    host: localhost
    port: 990
    username: foo
    password: bar
    from: "/in/"
    interval: PT10S
    action: MOVE
    moveDirectory: "/archive/"

Wait for one or more files in a given FTPS server's directory and process each of these files sequentially. In this example, we restrict the trigger to only wait for CSV files in the mydir directory.

id: ftp_wait_for_csv_in_mydir
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.ftps.Trigger
    host: localhost
    port: "21"
    username: foo
    password: bar
    from: "mydir/"
    regExp: ".*.csv"
    action: MOVE
    moveDirectory: "archive/"
    interval: PTS

action string required
Values: "MOVE" "DELETE" "NONE"
from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftps.Trigger" required
Constant: "io.kestra.plugin.fs.ftps.Trigger"
conditions array
dataChannelProtectionLevel string

Default value is : P

Default: "P"
Values: "C" "S" "E" "P"
description string
disabled boolean

Default value is : false

Default: false
insecureTrustAllCertificates boolean

Note: This makes the SSL connection insecure, and should only be used for testing.

interval string

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mode string

Default value is : EXPLICIT

Default: "EXPLICIT"
Values: "IMPLICIT" "EXPLICIT"
moveDirectory string
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 990

Default: "990"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
recursive boolean

Default value is : false

Default: false
regExp string
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftps.Upload object
Examples
id: fs_ftps_upload
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: upload
    type: io.kestra.plugin.fs.ftps.Upload
    host: localhost
    port: 990
    username: foo
    password: pass
    from: "{{ inputs.file }}"
    to: "/upload/dir2/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ftps.Upload" required
Constant: "io.kestra.plugin.fs.ftps.Upload"
allowFailure boolean

Default value is : false

Default: false
dataChannelProtectionLevel string

Default value is : P

Default: "P"
Values: "C" "S" "E" "P"
description string
disabled boolean

Default value is : false

Default: false
insecureTrustAllCertificates boolean

Note: This makes the SSL connection insecure, and should only be used for testing.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mode string

Default value is : EXPLICIT

Default: "EXPLICIT"
Values: "IMPLICIT" "EXPLICIT"
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 990

Default: "990"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
to string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ftps.Uploads object
Examples
id: fs_ftps_uploads
namespace: company.team

inputs:
  - id: file1
    type: FILE
  - id: file2
    type: FILE

tasks:
  - id: uploads
    type: io.kestra.plugin.fs.ftps.Uploads
    host: localhost
    port: 990
    username: foo
    password: pass
    from:
      - "{{ inputs.file1 }}"
      - "{{ inputs.file2 }}"
    to: "/upload/dir2"

from string | string[] required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required
type const: "io.kestra.plugin.fs.ftps.Uploads" required
Constant: "io.kestra.plugin.fs.ftps.Uploads"
allowFailure boolean

Default value is : false

Default: false
dataChannelProtectionLevel string

Default value is : P

Default: "P"
Values: "C" "S" "E" "P"
description string
disabled boolean

Default value is : false

Default: false
insecureTrustAllCertificates boolean

Note: This makes the SSL connection insecure, and should only be used for testing.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mode string

Default value is : EXPLICIT

Default: "EXPLICIT"
Values: "IMPLICIT" "EXPLICIT"
passiveMode boolean

Default value is : true

Default: true
password string
port string

Default value is : 990

Default: "990"
proxyHost string
proxyPort string
proxyType string
Values: "DIRECT" "HTTP" "SOCKS"
remoteIpVerification boolean

Default value is : true

Default: true
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.sftp.Delete object
Examples
id: fs_sftp_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.fs.sftp.Delete
    host: localhost
    port: "22"
    username: foo
    password: pass
    uri: "/upload/dir1/file.txt"

host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.sftp.Delete" required
Constant: "io.kestra.plugin.fs.sftp.Delete"
uri string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
keyExchangeAlgorithm string
keyfile string

To generate a PEM format key from OpenSSH, use the following command: ssh-keygen -m PEM

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
port string

Default value is : 22

Default: "22"
proxyHost string
proxyPassword string
proxyPort string
proxyType string
proxyUser string
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.sftp.Download object
Examples
id: fs_sftp_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.fs.sftp.Download
    host: localhost
    port: "22"
    username: foo
    password: pass
    from: "/in/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.sftp.Download" required
Constant: "io.kestra.plugin.fs.sftp.Download"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
keyExchangeAlgorithm string
keyfile string

To generate a PEM format key from OpenSSH, use the following command: ssh-keygen -m PEM

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
port string

Default value is : 22

Default: "22"
proxyHost string
proxyPassword string
proxyPort string
proxyType string
proxyUser string
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.sftp.Downloads object
Examples

Download a list of files and move it to an archive folders

id: fs_sftp_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.fs.sftp.Downloads
    host: localhost
    port: "22"
    username: foo
    password: pass
    from: "/in/"
    interval: PT10S
    action: MOVE
    moveDirectory: "/archive/"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.sftp.Downloads" required
Constant: "io.kestra.plugin.fs.sftp.Downloads"
action string
Values: "MOVE" "DELETE" "NONE"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
keyExchangeAlgorithm string
keyfile string

To generate a PEM format key from OpenSSH, use the following command: ssh-keygen -m PEM

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveDirectory string
passphrase string
password string
port string

Default value is : 22

Default: "22"
proxyHost string
proxyPassword string
proxyPort string
proxyType string
proxyUser string
recursive boolean

Default value is : false

Default: false
regExp string
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.sftp.List object
Examples
id: fs_sftp_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.fs.sftp.List
    host: localhost
    port: "22"
    username: foo
    password: pass
    from: "/upload/dir1/"
    regExp: ".*\/dir1\/.*.(yaml|yml)"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.sftp.List" required
Constant: "io.kestra.plugin.fs.sftp.List"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
keyExchangeAlgorithm string
keyfile string

To generate a PEM format key from OpenSSH, use the following command: ssh-keygen -m PEM

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
port string

Default value is : 22

Default: "22"
proxyHost string
proxyPassword string
proxyPort string
proxyType string
proxyUser string
recursive boolean

Default value is : false

Default: false
regExp string
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.sftp.Move object

If the destination directory doesn't exist, it will be created##### Examples

id: fs_sftp_move
namespace: company.team

tasks:
  - id: move
    type: io.kestra.plugin.fs.sftp.Move
    host: localhost
    port: "22"
    username: foo
    password: pass
    from: "/upload/dir1/file.txt"
    to: "/upload/dir2/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required

The full destination path (with filename optionally) If end with a /, the destination is considered as a directory and filename will be happen If the destFile exists, it is deleted first.

type const: "io.kestra.plugin.fs.sftp.Move" required
Constant: "io.kestra.plugin.fs.sftp.Move"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
keyExchangeAlgorithm string
keyfile string

To generate a PEM format key from OpenSSH, use the following command: ssh-keygen -m PEM

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
port string

Default value is : 22

Default: "22"
proxyHost string
proxyPassword string
proxyPort string
proxyType string
proxyUser string
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.sftp.Trigger object
Examples

Wait for one or more files in a given SFTP server's directory and process each of these files sequentially.

id: sftp_trigger_flow
namespace: company.team

tasks:
  - id: for_each_file
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.sftp.Trigger
    host: localhost
    port: 6622
    username: foo
    password: bar
    from: "/in/"
    interval: PT10S
    action: MOVE
    moveDirectory: "/archive/"

Wait for one or more files in a given SFTP server's directory and process each of these files sequentially. Delete files manually after processing to prevent infinite triggering.

id: sftp_trigger_flow
namespace: company.team

tasks:
  - id: for_each_file
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files | jq('.path') }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
      - id: delete
        type: io.kestra.plugin.fs.sftp.Delete
        host: localhost
        port: 6622
        username: foo
        password: bar
        uri: "/in/{{ taskrun.value }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.sftp.Trigger
    host: localhost
    port: 6622
    username: foo
    password: bar
    from: "/in/"
    interval: PT10S
    action: NONE

Wait for one or more files in a given SFTP server's directory and process each of these files sequentially. In this example, we restrict the trigger to only wait for CSV files in the mydir directory.

id: ftp_wait_for_csv_in_mydir
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.sftp.Trigger
    host: localhost
    port: "6622"
    username: foo
    password: bar
    from: "mydir/"
    regExp: ".*.csv"
    action: MOVE
    moveDirectory: "archive/"
    interval: PTS

action string required
Values: "MOVE" "DELETE" "NONE"
from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.sftp.Trigger" required
Constant: "io.kestra.plugin.fs.sftp.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
interval string

Default value is : 60.000000000

Default: 60.0
format=duration
keyExchangeAlgorithm string
keyfile string

To generate a PEM format key from OpenSSH, use the following command: ssh-keygen -m PEM

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveDirectory string
passphrase string
password string
port string

Default value is : 22

Default: "22"
proxyHost string
proxyPassword string
proxyPort string
proxyType string
proxyUser string
recursive boolean

Default value is : false

Default: false
regExp string
rootDir boolean

Default value is : true

Default: true
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.sftp.Upload object
Examples
id: fs_sftp_upload
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: upload
    type: io.kestra.plugin.fs.sftp.Upload
    host: localhost
    port: "22"
    username: foo
    password: pass
    from: "{{ inputs.file }}"
    to: "/upload/dir2/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.sftp.Upload" required
Constant: "io.kestra.plugin.fs.sftp.Upload"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
keyExchangeAlgorithm string
keyfile string

To generate a PEM format key from OpenSSH, use the following command: ssh-keygen -m PEM

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
port string

Default value is : 22

Default: "22"
proxyHost string
proxyPassword string
proxyPort string
proxyType string
proxyUser string
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
to string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.sftp.Uploads object
Examples
id: fs_sftp_uploads
namespace: company.team

inputs:
  - id: file1
    type: FILE
  - id: file2
    type: FILE

tasks:
  - id: uploads
    type: io.kestra.plugin.fs.sftp.Uploads
    host: localhost
    port: "22"
    username: foo
    password: pass
    from:
      - "{{ inputs.file1 }}"
      - "{{ inputs.file2 }}"
    to: "/upload/dir2"

from string | string[] required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required
type const: "io.kestra.plugin.fs.sftp.Uploads" required
Constant: "io.kestra.plugin.fs.sftp.Uploads"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
keyExchangeAlgorithm string
keyfile string

To generate a PEM format key from OpenSSH, use the following command: ssh-keygen -m PEM

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
port string

Default value is : 22

Default: "22"
proxyHost string
proxyPassword string
proxyPort string
proxyType string
proxyUser string
rootDir boolean

Default value is : true

Default: true
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.smb.Delete object
Examples
id: fs_smb_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.fs.smb.Delete
    host: localhost
    port: 445
    username: foo
    password: pass
    uri: "/my_share/dir1/file.txt"

host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.smb.Delete" required
Constant: "io.kestra.plugin.fs.smb.Delete"
uri string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string

Default value is : 445

Default: "445"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.smb.Download object
Examples
id: fs_smb_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.fs.smb.Download
    host: localhost
    port: 445
    username: foo
    password: pass
    from: "/my_share/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.smb.Download" required
Constant: "io.kestra.plugin.fs.smb.Download"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string

Default value is : 445

Default: "445"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.smb.Downloads object
Examples

Download files from my_share and move them to an archive_share

id: fs_smb_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.fs.smb.Downloads
    host: localhost
    port: 445
    username: foo
    password: pass
    from: "/my_share/"
    interval: PT10S
    action: MOVE
    moveDirectory: "/archive_share/"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.smb.Downloads" required
Constant: "io.kestra.plugin.fs.smb.Downloads"
action string
Values: "MOVE" "DELETE" "NONE"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveDirectory string
password string
port string

Default value is : 445

Default: "445"
recursive boolean

Default value is : false

Default: false
regExp string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.smb.List object
Examples
id: fs_smb_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.fs.smb.List
    host: localhost
    port: 445
    username: foo
    password: pass
    from: "/my_share/dir1/"
    regExp: ".*\/dir1\/.*.(yaml|yml)"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.smb.List" required
Constant: "io.kestra.plugin.fs.smb.List"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string

Default value is : 445

Default: "445"
recursive boolean

Default value is : false

Default: false
regExp string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.smb.Move object

If the destination directory doesn't exist, it will be created##### Examples

id: fs_smb_move
namespace: company.team

tasks:
  - id: move
    type: io.kestra.plugin.fs.smb.Move
    host: localhost
    port: 445
    username: foo
    password: pass
    from: "/my_share/dir1/file.txt"
    to: "/my_share/dir2/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required

The full destination path (with filename optionally) If end with a /, the destination is considered as a directory and filename will be happen If the destFile exists, it is deleted first.

type const: "io.kestra.plugin.fs.smb.Move" required
Constant: "io.kestra.plugin.fs.smb.Move"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string

Default value is : 445

Default: "445"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.smb.Trigger object
Examples

Wait for one or more files in a given SMB server's directory and process each of these files sequentially. Then move them to another share which is used as an archive.

id: smb_trigger_flow
namespace: company.team

tasks:
  - id: for_each_file
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.smb.Trigger
    host: localhost
    port: 445
    username: foo
    password: bar
    from: "/my_share/in/"
    interval: PT10S
    action: MOVE
    moveDirectory: "/archive_share/"

Wait for one or more files in a given SMB server's directory and process each of these files sequentially. Then move them to another share which is used as an archive.

id: smb_trigger_flow
namespace: company.team

tasks:
  - id: for_each_file
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"
      - id: delete
        type: io.kestra.plugin.fs.smb.Delete
        host: localhost
        port: 445
        username: foo
        password: bar
        uri: "/my_share/in/{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.smb.Trigger
    host: localhost
    port: 445
    username: foo
    password: bar
    from: "/my_share/in/"
    interval: PT10S
    action: NONE

Wait for one or more files in a given SMB server's directory (composed of share name followed by dir path) and process each of these files sequentially. In this example, we restrict the trigger to only wait for CSV files in the mydir directory.

id: smb_wait_for_csv_in_my_share_my_dir
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    value: "{{ trigger.files }}"
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value | jq('.path') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.fs.smb.Trigger
    host: localhost
    port: "445"
    username: foo
    password: bar
    from: "my_share/mydir/"
    regExp: ".*.csv"
    action: MOVE
    moveDirectory: "my_share/archivedir"
    interval: PTS

action string required
Values: "MOVE" "DELETE" "NONE"
from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.smb.Trigger" required
Constant: "io.kestra.plugin.fs.smb.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
interval string

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveDirectory string
password string
port string

Default value is : 445

Default: "445"
recursive boolean

Default value is : false

Default: false
regExp string
stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.smb.Upload object
Examples
id: fs_smb_upload
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: upload
    type: io.kestra.plugin.fs.smb.Upload
    host: localhost
    port: 445
    username: foo
    password: pass
    from: "{{ inputs.file }}"
    to: "/my_share/dir2/file.txt"

from string required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.smb.Upload" required
Constant: "io.kestra.plugin.fs.smb.Upload"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string

Default value is : 445

Default: "445"
timeout string
format=duration
to string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.smb.Uploads object
Examples
id: fs_smb_uploads
namespace: company.team

inputs:
  - id: file1
    type: FILE
  - id: file2
    type: FILE

tasks:
  - id: uploads
    type: io.kestra.plugin.fs.smb.Uploads
    host: localhost
    port: 445
    username: foo
    password: pass
    from:
      - "{{ inputs.file1 }}"
      - "{{ inputs.file2 }}"
    to: "/my_share/dir2"

from string | string[] required
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
to string required
type const: "io.kestra.plugin.fs.smb.Uploads" required
Constant: "io.kestra.plugin.fs.smb.Uploads"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
port string

Default value is : 445

Default: "445"
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.fs.ssh.Command object
Examples

Run SSH command using password authentication

id: fs_ssh_command
namespace: company.team

tasks:
  - id: command
    type: io.kestra.plugin.fs.ssh.Command
    host: localhost
    port: "22"
    authMethod: PASSWORD
    username: foo
    password: pass
    commands: ['ls']

Run SSH command using public key authentication (must be an OpenSSH private key)

id: fs_ssh_command
namespace: company.team

tasks:
  - id: command
    type: io.kestra.plugin.fs.ssh.Command
    host: localhost
    port: "22"
    authMethod: PUBLIC_KEY
    username: root
    privateKey: "{{ secret('SSH_RSA_PRIVATE_KEY') }}"
    commands: ['touch kestra_was_here']

Run SSH command using the local OpenSSH configuration

id: ssh
namespace: company.team
tasks:
  - id: ssh
    type: io.kestra.plugin.fs.ssh.Command
    authMethod: OPEN_SSH
    useOpenSSHConfig: true
    host: localhost
    password: pass.
    commands:
      - echo "Hello World"
commands string[] required
minItems=1
host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.fs.ssh.Command" required
Constant: "io.kestra.plugin.fs.ssh.Command"
allowFailure boolean

Default value is : false

Default: false
authMethod string

Default value is : PASSWORD

Default: "PASSWORD"
Values: "PASSWORD" "PUBLIC_KEY" "OPEN_SSH"
description string
disabled boolean

Default value is : false

Default: false
enableSshRsa1 boolean

Default value is : false

Default: false
env Record<string, string>
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
openSSHConfigDir string

Default value is : "~/.ssh/config"

Default: "~/.ssh/config"
password string
port string

Default value is : 22

Default: "22"
privateKey string
privateKeyPassphrase string
strictHostKeyChecking string

Default value is : "no"

Default: "no"
timeout string
format=duration
username string
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.auth.OauthAccessToken object
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.auth.OauthAccessToken" required
Constant: "io.kestra.plugin.gcp.auth.OauthAccessToken"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.AbstractLoad-AvroOptions object
useAvroLogicalTypes boolean

The value may be null.

io.kestra.plugin.gcp.bigquery.AbstractLoad-CsvOptions object
allowJaggedRows boolean

If true, BigQuery treats missing trailing columns as null values. If {@code false}, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. By default, rows with missing trailing columns are considered bad records.

allowQuotedNewLines boolean

By default quoted newline are not allowed.

encoding string

The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values set in {@link #setQuote(String)} and {@link #setFieldDelimiter(String)}.

fieldDelimiter string

BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').

quote string

BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set {@link #setAllowQuotedNewLines(boolean)} property to {@code true}.

skipLeadingRows integer

The default value is 0. This property is useful if you have header rows in the file that should be skipped.

io.kestra.plugin.gcp.bigquery.Copy object
Examples
id: gcp_bq_copy
namespace: company.team

tasks:
  - id: copy
    type: io.kestra.plugin.gcp.bigquery.Copy
    operationType: COPY
    sourceTables:
      - "my_project.my_dataset.my_table$20130908"
    destinationTable: "my_project.my_dataset.my_table"

destinationTable string required

If not provided a new table is created.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
operationType string required
  • COPY: The source and destination table have the same table type.
  • SNAPSHOT: The source table type is TABLE and the destination table type is SNAPSHOT.
  • RESTORE: The source table type is SNAPSHOT and the destination table type is TABLE.
  • CLONE: The source and destination table have the same table type, but only bill for unique data.
Values: "COPY" "SNAPSHOT" "RESTORE" "CLONE"
sourceTables string[] required

Can be table or partitions.

type const: "io.kestra.plugin.gcp.bigquery.Copy" required
Constant: "io.kestra.plugin.gcp.bigquery.Copy"
allowFailure boolean

Default value is : false

Default: false
createDisposition string
Values: "CREATE_IF_NEEDED" "CREATE_NEVER"
description string
disabled boolean

Default value is : false

Default: false
dryRun boolean

A valid query will mostly return an empty response with some processing statistics, while an invalid query will return the same error as it would if it were an actual run.

Default value is : false

Default: false
jobTimeout string

If this time limit is exceeded, BigQuery may attempt to terminate the job.

format=duration
labels object

You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.

location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
writeDisposition string
Values: "WRITE_TRUNCATE" "WRITE_APPEND" "WRITE_EMPTY"
io.kestra.plugin.gcp.bigquery.CopyPartitions object
Examples
id: gcp_bq_copy_partitions
namespace: company.team

tasks:
  - id: copy_partitions
    type: io.kestra.plugin.gcp.bigquery.CopyPartitions
    projectId: my-project
    dataset: my-dataset
    table: my-table
    destinationTable: my-dest-table
    partitionType: DAY
    from: "{{ now() | dateAdd(-30, 'DAYS') }}"
    to: "{{ now() | dateAdd(-7, 'DAYS') }}"

dataset string required
from string required

If the partition :

  • is a numeric range, must be a valid integer
  • is a date, must a valid datetime like {{ now() }}
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
partitionType string required
Values: "DAY" "HOUR" "MONTH" "YEAR" "RANGE"
table string required
to string required

If the partition :

  • is a numeric range, must be a valid integer
  • is a date, must a valid datetime like {{ now() }}
type const: "io.kestra.plugin.gcp.bigquery.CopyPartitions" required
Constant: "io.kestra.plugin.gcp.bigquery.CopyPartitions"
allowFailure boolean

Default value is : false

Default: false
createDisposition string
Values: "CREATE_IF_NEEDED" "CREATE_NEVER"
description string
destinationTable string

If not provided, a new table is created.

disabled boolean

Default value is : false

Default: false
dryRun boolean

A valid query will mostly return an empty response with some processing statistics, while an invalid query will return the same error as it would if it were an actual run.

Default value is : false

Default: false
jobTimeout string

If this time limit is exceeded, BigQuery may attempt to terminate the job.

format=duration
labels object

You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.

location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
writeDisposition string
Values: "WRITE_TRUNCATE" "WRITE_APPEND" "WRITE_EMPTY"
io.kestra.plugin.gcp.bigquery.CreateDataset object
Examples

Create a dataset if not exits

id: gcp_bq_create_dataset
namespace: company.team

tasks:
  - id: create_dataset
    type: io.kestra.plugin.gcp.bigquery.CreateDataset
    name: "my_dataset"
    location: "EU"
    ifExists: "SKIP"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.gcp.bigquery.CreateDataset" required
Constant: "io.kestra.plugin.gcp.bigquery.CreateDataset"
acl array
allowFailure boolean

Default value is : false

Default: false
defaultEncryptionConfiguration
All of: com.google.cloud.bigquery.EncryptionConfiguration object, The default encryption key for all tables in the dataset.
defaultPartitionExpirationMs integer

Once this property is set, all newly-created partitioned tables in the dataset will has an expirationMs property in the timePartitioning settings set to this value. Changing the value only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. The value may be null.

defaultTableLifetime integer

The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property. This property is experimental and might be subject to change or removed.

description string

A user-friendly description for the dataset.

disabled boolean

Default value is : false

Default: false
friendlyName string
ifExists string

Default value is : ERROR

Default: "ERROR"
Values: "ERROR" "UPDATE" "SKIP"
labels object
location string

This property is experimental and might be subject to change or removed. See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.CreateTable object
Examples
id: gcp_bq_create_table
namespace: company.team

tasks:
  - id: create_table
    type: io.kestra.plugin.gcp.bigquery.CreateTable
    projectId: my-project
    dataset: my-dataset
    table: my-table
    tableDefinition:
      type: TABLE
      schema:
        fields:
        - name: id
          type: INT64
        - name: name
          type: STRING
      standardTableDefinition:
        clustering:
        - id
        - name
    friendlyName: new_table

dataset string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
table string required
type const: "io.kestra.plugin.gcp.bigquery.CreateTable" required
Constant: "io.kestra.plugin.gcp.bigquery.CreateTable"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
encryptionConfiguration
All of: io.kestra.plugin.gcp.bigquery.models.EncryptionConfiguration object, The encryption configuration.
expirationDuration string

If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.

format=duration
friendlyName string
labels object
location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
requirePartitionFilter boolean
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
tableDefinition
All of: io.kestra.plugin.gcp.bigquery.models.TableDefinition object, The table definition.
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.DeleteDataset object
Examples

Delete a dataset.

id: gcp_bq_delete_dataset
namespace: company.team

tasks:
  - id: delete_dataset
    type: io.kestra.plugin.gcp.bigquery.DeleteDataset
    name: "my-dataset"
    deleteContents: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.gcp.bigquery.DeleteDataset" required
Constant: "io.kestra.plugin.gcp.bigquery.DeleteDataset"
allowFailure boolean

Default value is : false

Default: false
deleteContents boolean

If not provided, attempting to delete a non-empty dataset will result in a exception being thrown.

description string
disabled boolean

Default value is : false

Default: false
location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.DeletePartitions object
Examples
id: gcp_bq_delete_partitions
namespace: company.team

tasks:
  - id: delete_partitions
    type: io.kestra.plugin.gcp.bigquery.DeletePartitions
    projectId: my-project
    dataset: my-dataset
    table: my-table
    partitionType: DAY
    from: "{{ now() | dateAdd(-30, 'DAYS') }}"
    to: "{{ now() | dateAdd(-7, 'DAYS') }}"

dataset string required
from string required

If the partition :

  • is a numeric range, must be a valid integer
  • is a date, must a valid datetime like {{ now() }}
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
partitionType string required
Values: "DAY" "HOUR" "MONTH" "YEAR" "RANGE"
table string required
to string required

If the partition :

  • is a numeric range, must be a valid integer
  • is a date, must a valid datetime like {{ now() }}
type const: "io.kestra.plugin.gcp.bigquery.DeletePartitions" required
Constant: "io.kestra.plugin.gcp.bigquery.DeletePartitions"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.DeleteTable object
Examples

Delete a partition

id: gcp_bq_delete_table
namespace: company.team

tasks:
  - id: delete_table
    type: io.kestra.plugin.gcp.bigquery.DeleteTable
    projectId: my-project
    dataset: my-dataset
    table: my-table$20130908

dataset string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
table string required
type const: "io.kestra.plugin.gcp.bigquery.DeleteTable" required
Constant: "io.kestra.plugin.gcp.bigquery.DeleteTable"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.ExtractToGcs object
Examples

Extract a BigQuery table to a GCS bucket.

id: gcp_bq_extract_to_gcs
namespace: company.team

tasks:
  - id: extract_to_gcs
    type: io.kestra.plugin.gcp.bigquery.ExtractToGcs
    destinationUris:
      - "gs://bucket_name/filename.csv"
    sourceTable: "my_project.my_dataset.my_table"
    format: CSV
    fieldDelimiter: ';'
    printHeader: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.bigquery.ExtractToGcs" required
Constant: "io.kestra.plugin.gcp.bigquery.ExtractToGcs"
allowFailure boolean

Default value is : false

Default: false
compression string
description string
destinationUris string[]
disabled boolean

Default value is : false

Default: false
fieldDelimiter string
format string
jobTimeoutMs integer
labels object

The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key. Parameters: labels - labels or null for none

location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
printHeader boolean
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
sourceTable string
timeout string
format=duration
useAvroLogicalTypes boolean

[Optional] If destinationFormat is set to "AVRO", this flag indicates whether to enable extracting applicable column types (such as TIMESTAMP) to their corresponding AVRO logical types (timestamp-micros), instead of only using their raw types (avro-long).

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.Load object
Examples

Load an csv file from an input file

id: gcp_bq_load
namespace: company.team

tasks:
  - id: load
    type: io.kestra.plugin.gcp.bigquery.Load
    from: "{{ inputs.file }}"
    destinationTable: "my_project.my_dataset.my_table"
    format: CSV
    csvOptions:
      fieldDelimiter: ";"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.bigquery.Load" required
Constant: "io.kestra.plugin.gcp.bigquery.Load"
allowFailure boolean

Default value is : false

Default: false
autodetect boolean
avroOptions
All of: io.kestra.plugin.gcp.bigquery.AbstractLoad-AvroOptions object, Avro parsing options.
clusteringFields string[]
createDisposition string
Values: "CREATE_IF_NEEDED" "CREATE_NEVER"
csvOptions
All of: io.kestra.plugin.gcp.bigquery.AbstractLoad-CsvOptions object, Csv parsing options.
description string
destinationTable string

If not provided, a new table is created.

disabled boolean

Default value is : false

Default: false
failedOnEmpty boolean

Default value is : true

Default: true
format string
Values: "CSV" "JSON" "AVRO" "PARQUET" "ORC"
from string
ignoreUnknownValues boolean

If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. By default unknown values are not allowed.

location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxBadRecords integer

If the number of bad records exceeds this value, an invalid error is returned in the job result. By default, no bad record is ignored.

projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
schema object

The schema can be omitted if the destination table already exists, or if you're loading data from a Google Cloud Datastore backup (i.e. DATASTORE_BACKUP format option).

schema:
  fields:
    - name: colA
      type: STRING
    - name: colB
      type: NUMERIC

See type from StandardSQLTypeName

schemaUpdateOptions string[]

Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema.

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timePartitioningField string
timePartitioningType string

Default value is : DAY

Default: "DAY"
Values: "DAY" "HOUR" "MONTH" "YEAR"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
writeDisposition string
Values: "WRITE_TRUNCATE" "WRITE_APPEND" "WRITE_EMPTY"
io.kestra.plugin.gcp.bigquery.LoadFromGcs object
Examples

Load an avro file from a gcs bucket

id: gcp_bq_load_from_gcs
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/orders.csv

  - id: csv_to_ion
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ outputs.http_download.uri }}"
    header: true

  - id: ion_to_avro
    type: io.kestra.plugin.serdes.avro.IonToAvro
    from: "{{ outputs.csv_to_ion.uri }}"
    schema: |
      {
        "type": "record",
        "name": "Order",
        "namespace": "com.example.order",
        "fields": [
          {"name": "order_id", "type": "int"},
          {"name": "customer_name", "type": "string"},
          {"name": "customer_email", "type": "string"},
          {"name": "product_id", "type": "int"},
          {"name": "price", "type": "double"},
          {"name": "quantity", "type": "int"},
          {"name": "total", "type": "double"}
        ]
      }

  - id: load_from_gcs
    type: io.kestra.plugin.gcp.bigquery.LoadFromGcs
    from:
      - "{{ outputs.ion_to_avro.uri }}"
    destinationTable: "my_project.my_dataset.my_table"
    format: AVRO
    avroOptions:
      useAvroLogicalTypes: true

Load a csv file with a defined schema

id: gcp_bq_load_files_test
namespace: company.team

tasks:
  - id: load_files_test
    type: io.kestra.plugin.gcp.bigquery.LoadFromGcs
    destinationTable: "myDataset.myTable"
    ignoreUnknownValues: true
    schema:
      fields:
        - name: colA
          type: STRING
        - name: colB
          type: NUMERIC
        - name: colC
          type: STRING
    format: CSV
    csvOptions:
      allowJaggedRows: true
      encoding: UTF-8
      fieldDelimiter: ","
    from:
      - gs://myBucket/myFile.csv

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.bigquery.LoadFromGcs" required
Constant: "io.kestra.plugin.gcp.bigquery.LoadFromGcs"
allowFailure boolean

Default value is : false

Default: false
autodetect boolean
avroOptions
All of: io.kestra.plugin.gcp.bigquery.AbstractLoad-AvroOptions object, Avro parsing options.
clusteringFields string[]
createDisposition string
Values: "CREATE_IF_NEEDED" "CREATE_NEVER"
csvOptions
All of: io.kestra.plugin.gcp.bigquery.AbstractLoad-CsvOptions object, Csv parsing options.
description string
destinationTable string

If not provided, a new table is created.

disabled boolean

Default value is : false

Default: false
format string
Values: "CSV" "JSON" "AVRO" "PARQUET" "ORC"
from string[]

The fully-qualified URIs that point to source data in Google Cloud Storage (e.g. gs://bucket/path). Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.

ignoreUnknownValues boolean

If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. By default unknown values are not allowed.

location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxBadRecords integer

If the number of bad records exceeds this value, an invalid error is returned in the job result. By default, no bad record is ignored.

projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
schema object

The schema can be omitted if the destination table already exists, or if you're loading data from a Google Cloud Datastore backup (i.e. DATASTORE_BACKUP format option).

schema:
  fields:
    - name: colA
      type: STRING
    - name: colB
      type: NUMERIC

See type from StandardSQLTypeName

schemaUpdateOptions string[]

Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema.

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timePartitioningField string
timePartitioningType string

Default value is : DAY

Default: "DAY"
Values: "DAY" "HOUR" "MONTH" "YEAR"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
writeDisposition string
Values: "WRITE_TRUNCATE" "WRITE_APPEND" "WRITE_EMPTY"
io.kestra.plugin.gcp.bigquery.Query object
Examples

Create a table with a custom query.

id: gcp_bq_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.gcp.bigquery.Query
    destinationTable: "my_project.my_dataset.my_table"
    writeDisposition: WRITE_APPEND
    sql: |
      SELECT
        "hello" as string,
        NULL AS `nullable`,
        1 as int,
        1.25 AS float,
        DATE("2008-12-25") AS date,
        DATETIME "2008-12-25 15:30:00.123456" AS datetime,
        TIME(DATETIME "2008-12-25 15:30:00.123456") AS time,
        TIMESTAMP("2008-12-25 15:30:00.123456") AS timestamp,
        ST_GEOGPOINT(50.6833, 2.9) AS geopoint,
        ARRAY(SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) AS `array`,
        STRUCT(4 AS x, 0 AS y, ARRAY(SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3) AS z) AS `struct`

Execute a query and fetch results sets on another task.

id: gcp_bq_query
namespace: company.team

tasks:
  - id: fetch
    type: io.kestra.plugin.gcp.bigquery.Query
    fetch: true
    sql: |
      SELECT 1 as id, "John" as name
      UNION ALL
      SELECT 2 as id, "Doe" as name
  - id: use_fetched_data
    type: io.kestra.plugin.core.debug.Return
    format: |
      {% for row in outputs.fetch.rows %}
      id : {{ row.id }}, name: {{ row.name }}
      {% endfor %}

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.bigquery.Query" required
Constant: "io.kestra.plugin.gcp.bigquery.Query"
allowFailure boolean

Default value is : false

Default: false
allowLargeResults boolean

If true the query is allowed to create large results at a slight cost in performance. destinationTable must be provided.

clusteringFields string[]
createDisposition string
Values: "CREATE_IF_NEEDED" "CREATE_NEVER"
defaultDataset string

This dataset is used for all unqualified table names used in the query.

description string
destinationTable string

If not provided, a new table is created.

disabled boolean

Default value is : false

Default: false
dryRun boolean

A valid query will mostly return an empty response with some processing statistics, while an invalid query will return the same error as it would if it were an actual run.

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
flattenResults boolean

If set to false, allowLargeResults must be true.

Default value is : true

Default: true
jobTimeout string

If this time limit is exceeded, BigQuery may attempt to terminate the job.

format=duration
labels object

You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.

legacySql boolean

By default this property is set to false.

Default value is : false

Default: false
location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxResults integer

The maximum number of rows of data to return per page of results. Setting this flag to a small value such as 1000 and then paging through results might improve reliability when the query result set is large. In addition to this limit, responses are also limited to 10 MB. By default, there is no maximum row count, and only the byte limit applies.

maximumBillingTier integer

Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.

maximumBytesBilled integer

Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.

priority string

Default value is : INTERACTIVE

Default: "INTERACTIVE"
Values: "INTERACTIVE" "BATCH"
projectId string
rangePartitioningEnd integer
rangePartitioningField string
rangePartitioningInterval integer
rangePartitioningStart integer
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
schemaUpdateOptions string[]

Schema update options are supported in two cases: * when writeDisposition is WRITE_APPEND;

  • when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema.
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
sql string
store boolean

Default value is : false

Default: false
timePartitioningField string
timePartitioningType string

Default value is : DAY

Default: "DAY"
Values: "DAY" "HOUR" "MONTH" "YEAR"
timeout string
format=duration
useLegacySql boolean

A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run.

Default value is : false

Default: false
useQueryCache boolean

The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when destinationTable is not set

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
writeDisposition string
Values: "WRITE_TRUNCATE" "WRITE_APPEND" "WRITE_EMPTY"
io.kestra.plugin.gcp.bigquery.StorageWrite object
Examples
id: gcp_bq_storage_write
namespace: company.team

tasks:
  - id: read_data
    type: io.kestra.plugin.core.http.Download
    uri: https://dummyjson.com/products/1

  - id: storage_write
    type: io.kestra.plugin.gcp.bigquery.StorageWrite
    from: "{{ outputs.read_data.uri }}"
    destinationTable: "my_project.my_dataset.my_table"
    writeStreamType: DEFAULT

destinationTable string required

The table must be created before.

minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.bigquery.StorageWrite" required
Constant: "io.kestra.plugin.gcp.bigquery.StorageWrite"
allowFailure boolean

Default value is : false

Default: false
bufferSize integer

Default value is : 1000

Default: 1000
description string
disabled boolean

Default value is : false

Default: false
from string
location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
writeStreamType string

Default value is : DEFAULT

Default: "DEFAULT"
Values: "DEFAULT" "COMMITTED" "PENDING"
io.kestra.plugin.gcp.bigquery.TableMetadata object
Examples
id: gcp_bq_table_metadata
namespace: company.team

tasks:
  - id: table_metadata
    type: io.kestra.plugin.gcp.bigquery.TableMetadata
    projectId: my-project
    dataset: my-dataset
    table: my-table

dataset string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
table string required
type const: "io.kestra.plugin.gcp.bigquery.TableMetadata" required
Constant: "io.kestra.plugin.gcp.bigquery.TableMetadata"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
ifNotExists string

If the policy is SKIP, the output will contain only null value, otherwise an error is raised.

Default value is : ERROR

Default: "ERROR"
Values: "ERROR" "SKIP"
location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.Trigger object
Examples

Wait for a sql query to return results and iterate through rows.

id: bigquery-listen
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.gcp.bigquery.Trigger
    interval: "PT5M"
    sql: "SELECT * FROM `myproject.mydataset.mytable`"
    fetch: true
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.bigquery.Trigger" required
Constant: "io.kestra.plugin.gcp.bigquery.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
legacySql boolean

By default this property is set to false.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.UpdateDataset object
Examples
id: gcp_bq_update_dataset
namespace: company.team

tasks:
  - id: update_dataset
    type: io.kestra.plugin.gcp.bigquery.UpdateDataset
    name: "my_dataset"
    location: "EU"
    friendlyName: "new Friendly Name"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.gcp.bigquery.UpdateDataset" required
Constant: "io.kestra.plugin.gcp.bigquery.UpdateDataset"
acl array
allowFailure boolean

Default value is : false

Default: false
defaultEncryptionConfiguration
All of: com.google.cloud.bigquery.EncryptionConfiguration object, The default encryption key for all tables in the dataset.
defaultPartitionExpirationMs integer

Once this property is set, all newly-created partitioned tables in the dataset will has an expirationMs property in the timePartitioning settings set to this value. Changing the value only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. The value may be null.

defaultTableLifetime integer

The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property. This property is experimental and might be subject to change or removed.

description string

A user-friendly description for the dataset.

disabled boolean

Default value is : false

Default: false
friendlyName string
labels object
location string

This property is experimental and might be subject to change or removed. See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.UpdateTable object
Examples
id: gcp_bq_update_table
namespace: company.team

tasks:
  - id: update_table
    type: io.kestra.plugin.gcp.bigquery.UpdateTable
    projectId: my-project
    dataset: my-dataset
    table: my-table
    expirationDuration: PT48H

dataset string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
table string required
type const: "io.kestra.plugin.gcp.bigquery.UpdateTable" required
Constant: "io.kestra.plugin.gcp.bigquery.UpdateTable"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
encryptionConfiguration
All of: io.kestra.plugin.gcp.bigquery.models.EncryptionConfiguration object, The encryption configuration.
expirationDuration string

If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.

format=duration
friendlyName string
labels object
location string

This property is experimental and might be subject to change or removed.

See Dataset Location

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
requirePartitionFilter boolean
retryAuto
retryMessages string[]

Message is tested as a substring of the full message, and is case insensitive.

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`

Default value is : `- due to concurrent update

  • Retrying the job may solve the problem`
Default:
[
  "due to concurrent update",
  "Retrying the job may solve the problem"
]
retryReasons string[]

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`

Default value is : `- rateLimitExceeded

  • jobBackendError
  • internalError
  • jobInternalError`
Default:
[
  "rateLimitExceeded",
  "jobBackendError",
  "internalError",
  "jobInternalError"
]
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
tableDefinition
All of: io.kestra.plugin.gcp.bigquery.models.TableDefinition object, The table definition.
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.bigquery.models.AccessControl object
entity required
All of: io.kestra.plugin.gcp.bigquery.models.Entity object, The GCP entity.
role string required
Values: "READER" "WRITER" "OWNER"
io.kestra.plugin.gcp.bigquery.models.EncryptionConfiguration object
The KMS key name. string
io.kestra.plugin.gcp.bigquery.models.Entity object
type string required
Values: "DOMAIN" "GROUP" "USER" "IAM_MEMBER"
value string required

For example, user email if the type is USER.

io.kestra.plugin.gcp.bigquery.models.ExternalTableDefinition object
autodetect boolean
compression string
formatType string
Values: "CSV" "JSON" "BIGTABLE" "DATASTORE_BACKUP" "AVRO" "GOOGLE_SHEETS" "PARQUET" "ORC"
ignoreUnknownValues boolean

If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result.

maxBadRecords integer

If the number of bad records exceeds this value, an invalid error is returned in the job result.

sourceUris string[]

Each URI can

  • contain one '*' wildcard character that must come after the bucket's name. Size limits related
  • to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size
  • across all URIs.
io.kestra.plugin.gcp.bigquery.models.Field object
description string
mode string

By default, Field.Mode.NULLABLE is used.

Values: "NULLABLE" "REQUIRED" "REPEATED"
name string
policyTags
All of: io.kestra.plugin.gcp.bigquery.models.PolicyTags object, The policy tags for the field.
subFields array
type string
Values: "BOOL" "INT64" "FLOAT64" "NUMERIC" "BIGNUMERIC" "STRING" "BYTES" "STRUCT" "ARRAY" "TIMESTAMP" "DATE" "TIME" "DATETIME" "GEOGRAPHY" "JSON" "INTERVAL" "RANGE"
io.kestra.plugin.gcp.bigquery.models.MaterializedViewDefinition object
enableRefresh boolean
lastRefreshDate string
format=date-time
query string
refreshInterval string
format=duration
io.kestra.plugin.gcp.bigquery.models.PolicyTags object
The policy tags' names. string[]
io.kestra.plugin.gcp.bigquery.models.RangePartitioning object
The range of range partitioning. object
3 nested properties
The end of range partitioning. integer
The start of range partitioning. integer
The width of each interval. integer
The range partitioning field. string
io.kestra.plugin.gcp.bigquery.models.RangePartitioning-Range object
The end of range partitioning. integer
The start of range partitioning. integer
The width of each interval. integer
io.kestra.plugin.gcp.bigquery.models.Schema object
The fields associated with this schema. io.kestra.plugin.gcp.bigquery.models.Field[]
io.kestra.plugin.gcp.bigquery.models.StandardTableDefinition object
clustering string[]
rangePartitioning
All of: io.kestra.plugin.gcp.bigquery.models.RangePartitioning object, Returns the range partitioning configuration for this table. If {@code null}, the table is not range-partitioned.
streamingBuffer
All of: com.google.cloud.bigquery.StandardTableDefinition-StreamingBuffer object, Returns information on the table's streaming buffer, if exists. Returns {@code null} if no streaming buffer exists.
timePartitioning
All of: io.kestra.plugin.gcp.bigquery.models.TimePartitioning object, Returns the time partitioning configuration for this table. If {@code null}, the table is not time-partitioned.
io.kestra.plugin.gcp.bigquery.models.TableDefinition object
externalTableDefinition
All of: io.kestra.plugin.gcp.bigquery.models.ExternalTableDefinition object, The external table definition if the type is `EXTERNAL`.
materializedViewDefinition
All of: io.kestra.plugin.gcp.bigquery.models.MaterializedViewDefinition object, The materialized view definition if the type is `MATERIALIZED_VIEW`.
schema
All of: io.kestra.plugin.gcp.bigquery.models.Schema object, The table's schema.
standardTableDefinition
All of: io.kestra.plugin.gcp.bigquery.models.StandardTableDefinition object, The table definition if the type is `TABLE`.
type string
Values: "TABLE" "VIEW" "MATERIALIZED_VIEW" "EXTERNAL" "MODEL"
viewDefinition
All of: io.kestra.plugin.gcp.bigquery.models.ViewDefinition object, The view definition if the type is `VIEW`.
io.kestra.plugin.gcp.bigquery.models.TimePartitioning object
expiration string
format=duration
field string
requirePartitionFilter boolean
type string
Values: "DAY" "HOUR" "MONTH" "YEAR"
io.kestra.plugin.gcp.bigquery.models.UserDefinedFunction object
The type of user defined function. string
Values: "INLINE" "FROM_URI"
Type of UserDefinedFunction string

If type is UserDefinedFunction.Type.INLINE, this method returns a code blob. If type is UserDefinedFunction.Type.FROM_URI, the method returns a Google Cloud Storage URI (e.g. gs://bucket/path).

io.kestra.plugin.gcp.bigquery.models.ViewDefinition object
query string
io.kestra.plugin.gcp.cli.GCloudCLI object
Examples

Create a cluster then list them using a service account.

id: gcp_g_cloud_cli
namespace: company.team

tasks:
  - id: g_cloud_cli
    type: io.kestra.plugin.gcp.cli.GCloudCLI
    projectId: my-gcp-project
    serviceAccount: "{{ secret('gcp-sa') }}"
    commands:
      - gcloud container clusters create simple-cluster --region=europe-west3
      - gcloud container clusters list

Create a GCS bucket.

id: gcp_g_cloud_cli
namespace: company.team

tasks:
  - id: g_cloud_cli
    type: io.kestra.plugin.gcp.cli.GCloudCLI
    projectId: my-gcp-project
    serviceAccount: "{{ secret('gcp-sa') }}"
    commands:
      - gcloud storage buckets create gs://my-bucket

Output the result of a command.

id: gcp_g_cloud_cli
namespace: company.team

tasks:
  - id: g_cloud_cli
    type: io.kestra.plugin.gcp.cli.GCloudCLI
    projectId: my-gcp-project
    serviceAccount: "{{ secret('gcp-sa') }}"
    commands:
      # Outputs as a flow output for UI display
      - gcloud pubsub topics list --format=json | tr -d '
 ' | xargs -0 -I {} echo '::{"outputs":{"gcloud":{}}}::'

      # Outputs as a file, preferred way for large payloads
      - gcloud storage ls --json > storage.json

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.cli.GCloudCLI" required
Constant: "io.kestra.plugin.gcp.cli.GCloudCLI"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : google/cloud-sdk

Default: "google/cloud-sdk"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

projectId string
serviceAccount string
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-ExecutionConfiguration object
kmsKey string
networkTags string[]
networkUri string
serviceAccountEmail string
subnetworkUri string
io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-PeripheralsConfiguration object
metastoreService string

Example: projects/[project_id]/locations/[region]/services/[service_id]

sparkHistoryServer
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-SparkHistoryServerConfiguration object, Resource name of an existing Dataproc Metastore service.
io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-RuntimeConfiguration object
containerImage string

If not specified, a default container image will be used.

properties object
version string
io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-SparkHistoryServerConfiguration object
dataprocCluster string

Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]

io.kestra.plugin.gcp.dataproc.batches.PySparkSubmit object
Examples
id: gcp_dataproc_py_spark_submit
namespace: company.team
tasks:
  - id: py_spark_submit
    type: io.kestra.plugin.gcp.dataproc.batches.PySparkSubmit
    mainPythonFileUri: 'gs://spark-jobs-kestra/pi.py'
    name: test-pyspark
    region: europe-west3

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
mainPythonFileUri string required

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

name string required
region string required
type const: "io.kestra.plugin.gcp.dataproc.batches.PySparkSubmit" required
Constant: "io.kestra.plugin.gcp.dataproc.batches.PySparkSubmit"
allowFailure boolean

Default value is : false

Default: false
archiveUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

args string[]

Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

description string
disabled boolean

Default value is : false

Default: false
execution
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-ExecutionConfiguration object, Execution configuration for a workload.
fileUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

jarFileUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
peripherals
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-PeripheralsConfiguration object, Peripherals configuration for a workload.
projectId string
runtime
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-RuntimeConfiguration object, Runtime configuration for a workload.
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.dataproc.batches.RSparkSubmit object
Examples
id: gcp_dataproc_r_spark_submit
namespace: company.team
tasks:
  - id: r_spark_submit
    type: io.kestra.plugin.gcp.dataproc.batches.RSparkSubmit
    mainRFileUri: 'gs://spark-jobs-kestra/dataframe.r'
    name: test-rspark
    region: europe-west3

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
mainRFileUri string required

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

name string required
region string required
type const: "io.kestra.plugin.gcp.dataproc.batches.RSparkSubmit" required
Constant: "io.kestra.plugin.gcp.dataproc.batches.RSparkSubmit"
allowFailure boolean

Default value is : false

Default: false
archiveUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

args string[]

Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

description string
disabled boolean

Default value is : false

Default: false
execution
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-ExecutionConfiguration object, Execution configuration for a workload.
fileUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

jarFileUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
peripherals
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-PeripheralsConfiguration object, Peripherals configuration for a workload.
projectId string
runtime
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-RuntimeConfiguration object, Runtime configuration for a workload.
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.dataproc.batches.SparkSqlSubmit object
Examples
id: gcp_dataproc_spark_sql_submit
namespace: company.team
tasks:
  - id: spark_sql_submit
    type: io.kestra.plugin.gcp.dataproc.batches.SparkSqlSubmit
    queryFileUri: 'gs://spark-jobs-kestra/foobar.py'
    name: test-sparksql
    region: europe-west3

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
queryFileUri string required

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

region string required
type const: "io.kestra.plugin.gcp.dataproc.batches.SparkSqlSubmit" required
Constant: "io.kestra.plugin.gcp.dataproc.batches.SparkSqlSubmit"
allowFailure boolean

Default value is : false

Default: false
archiveUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

args string[]

Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

description string
disabled boolean

Default value is : false

Default: false
execution
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-ExecutionConfiguration object, Execution configuration for a workload.
fileUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

jarFileUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
peripherals
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-PeripheralsConfiguration object, Peripherals configuration for a workload.
projectId string
runtime
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-RuntimeConfiguration object, Runtime configuration for a workload.
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.dataproc.batches.SparkSubmit object
Examples
id: gcp_dataproc_spark_submit
namespace: company.team
tasks:
  - id: spark_submit
    type: io.kestra.plugin.gcp.dataproc.batches.SparkSubmit
    jarFileUris:
      - 'gs://spark-jobs-kestra/spark-examples.jar'
    mainClass: org.apache.spark.examples.SparkPi
    args:
      - 1000
    name: test-spark
    region: europe-west3

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
mainClass string required

The jar file that contains the class must be in the classpath or specified in jarFileUris

name string required
region string required
type const: "io.kestra.plugin.gcp.dataproc.batches.SparkSubmit" required
Constant: "io.kestra.plugin.gcp.dataproc.batches.SparkSubmit"
allowFailure boolean

Default value is : false

Default: false
archiveUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

args string[]

Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.

description string
disabled boolean

Default value is : false

Default: false
execution
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-ExecutionConfiguration object, Execution configuration for a workload.
fileUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

jarFileUris string[]

Hadoop Compatible File System (HCFS) URIs should be accessible from the cluster. Can be a GCS file with the gs:// prefix, an HDFS file on the cluster with the hdfs:// prefix, or a local file on the cluster with the file:// prefix

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
peripherals
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-PeripheralsConfiguration object, Peripherals configuration for a workload.
projectId string
runtime
All of: io.kestra.plugin.gcp.dataproc.batches.AbstractBatch-RuntimeConfiguration object, Runtime configuration for a workload.
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.dataproc.clusters.Create object
Examples

Creates a cluster in Google Cloud Dataproc.

id: gcp_dataproc_cluster_create
namespace: company.team

tasks:
  - id: cluster_create
    type: io.kestra.plugin.gcp.dataproc.clusters.Create
    clusterName: YOUR_CLUSTER_NAME
    region: YOUR_REGION
    zone: YOUR_ZONE
    masterMachineType: n1-standard-2
    workerMachineType: n1-standard-2
    workers: 2
    bucket: YOUR_BUCKET_NAME

Creates a cluster in Google Cloud Dataproc with specific disk size.

id: gcp_dataproc_cluster_create
namespace: company.team

tasks:
  - id: create_cluster_with_certain_disk_size
    type: io.kestra.plugin.gcp.dataproc.clusters.Create
    clusterName: YOUR_CLUSTER_NAME
    region: YOUR_REGION
    zone: YOUR_ZONE
    masterMachineType: n1-standard-2
    masterDiskSizeGB: 500
    workerMachineType: n1-standard-2
    workerDiskSizeGB: 200
    workers: 2
    bucket: YOUR_BUCKET_NAM

clusterName string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
region string required
minLength=1
type const: "io.kestra.plugin.gcp.dataproc.clusters.Create" required
Constant: "io.kestra.plugin.gcp.dataproc.clusters.Create"
allowFailure boolean

Default value is : false

Default: false
bucket string
description string
disabled boolean

Default value is : false

Default: false
imageVersion string

The Compute Engine image resource used for cluster instances.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
masterDiskSizeGB integer
masterMachineType string
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerDiskSizeGB integer
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
workerMachineType string
workers integer
zone string
io.kestra.plugin.gcp.dataproc.clusters.Delete object
Examples

Deletes a cluster from Google Cloud Dataproc.

id: gcp_dataproc_cluster_delete
namespace: company.team

tasks:
  - id: create_cluster_with_certain_disk_size
    type: io.kestra.plugin.gcp.dataproc.clusters.Create
    clusterName: YOUR_CLUSTER_NAME
    region: YOUR_REGION

clusterName string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
region string required
minLength=1
type const: "io.kestra.plugin.gcp.dataproc.clusters.Delete" required
Constant: "io.kestra.plugin.gcp.dataproc.clusters.Delete"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.firestore.Delete object
Examples
id: gcp_firestore_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.gcp.firestore.Delete
    collection: "persons"
    childPath: "1"

childPath string required

The Firestore document child path.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.firestore.Delete" required
Constant: "io.kestra.plugin.gcp.firestore.Delete"
allowFailure boolean

Default value is : false

Default: false
collection string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.firestore.Get object
Examples

Get a document from its path.

id: gcp_firestore_get
namespace: company.team

tasks:
  - id: get
    type: io.kestra.plugin.gcp.firestore.Get
    collection: "persons"
    childPath: "1"

childPath string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.firestore.Get" required
Constant: "io.kestra.plugin.gcp.firestore.Get"
allowFailure boolean

Default value is : false

Default: false
collection string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.firestore.Query object
Examples
id: gcp_firestore_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.gcp.firestore.Query
    collection: "persons"
    filters:
      - field: "lastname"
        value: "Doe"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.firestore.Query" required
Constant: "io.kestra.plugin.gcp.firestore.Query"
allowFailure boolean

Default value is : false

Default: false
collection string
description string
disabled boolean

Default value is : false

Default: false
fetchType string

FETCH_ONE output the first row, FETCH output all the rows, STORE store all rows in a file, NONE do nothing.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
filters array
limit integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
offset integer
orderBy string
orderDirection string

Default value is : ASCENDING

Default: "ASCENDING"
Values: "ASCENDING" "DESCENDING"
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.firestore.Query-Filter object
field string required
value string required

Field value for the filter. Only strings are supported at the moment.

operator string

Default value is : EQUAL_TO

Default: "EQUAL_TO"
Values: "EQUAL_TO" "NOT_EQUAL_TO" "LESS_THAN" "LESS_THAN_OR_EQUAL_TO" "GREATER_THAN" "GREATER_THAN_OR_EQUAL_TO"
io.kestra.plugin.gcp.firestore.Set object
Examples

Set a document from a map.

id: gcp_firestore_set
namespace: company.team

tasks:
  - id: set
    type: io.kestra.plugin.gcp.firestore.Set
    collection: "persons"
    document:
      firstname: "John"
      lastname: "Doe"

Set a document from a JSON string.

id: gcp_firestore_set
namespace: company.team

inputs:
  - id: json_string
    type: STRING
    default: "{"firstname": "John", "lastname": "Doe"}"

tasks:
  - id: set
    type: io.kestra.plugin.gcp.firestore.Set
    collection: "persons"
    document: "{{ inputs.json_string }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.firestore.Set" required
Constant: "io.kestra.plugin.gcp.firestore.Set"
allowFailure boolean

Default value is : false

Default: false
childPath string
collection string
description string
disabled boolean

Default value is : false

Default: false
document string | object

Can be a JSON string, or a map.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.Compose object
Examples

Concat files in a bucket

id: gcp_gcs_compose
namespace: company.team

tasks:
  - id: compose
    type: io.kestra.plugin.gcp.gcs.Compose
    list:
      from: "gs://my_bucket/dir/"
    to: "gs://my_bucket/destination/my-compose-file.txt"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
list required
All of: io.kestra.plugin.gcp.gcs.Compose-List object, The directory to list
type const: "io.kestra.plugin.gcp.gcs.Compose" required
Constant: "io.kestra.plugin.gcp.gcs.Compose"
allowEmpty boolean

Default value is : false

Default: false
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
to string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.Compose-List object
from string required
filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
listingType string

if DIRECTORY, will only list objects in the specified directory if RECURSIVE, will list objects in the specified directory recursively Default value is DIRECTORY When using RECURSIVE value, be careful to move your files to a location not in the from scope

Default value is : DIRECTORY

Default: "DIRECTORY"
Values: "RECURSIVE" "DIRECTORY"
regExp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

io.kestra.plugin.gcp.gcs.Copy object

Copy the file between Internal Storage or Google Cloud Storage file##### Examples

Move a file between bucket path

id: gcp_gcs_copy
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: copy
    type: io.kestra.plugin.gcp.gcs.Copy
    from: "{{ inputs.file }}"
    delete: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gcs.Copy" required
Constant: "io.kestra.plugin.gcp.gcs.Copy"
allowFailure boolean

Default value is : false

Default: false
delete boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
from string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
to string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.CreateBucket object
Examples

Create a new bucket with some options

id: gcp_gcs_create_bucket
namespace: company.team

tasks:
  - id: create_bucket
    type: io.kestra.plugin.gcp.gcs.CreateBucket
    name: "my-bucket"
    versioningEnabled: true
    labels:
      my-label: my-value

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.gcp.gcs.CreateBucket" required
Constant: "io.kestra.plugin.gcp.gcs.CreateBucket"
allowFailure boolean

Default value is : false

Default: false

The access control configuration to apply to bucket's blobs when no other configuration is specified. See About Access Control Lists

defaultEventBasedHold boolean
defaultKmsKeyName string
description string
disabled boolean

Default value is : false

Default: false
iamConfiguration
All of: io.kestra.plugin.gcp.gcs.models.IamConfiguration object, The Bucket's IAM Configuration
ifExists string

Default value is : ERROR

Default: "ERROR"
Values: "ERROR" "UPDATE" "SKIP"
indexPage string

Behaves as the bucket's directory index where missing blobs are treated as potential directories.

labels object

This configuration is expressed as a number of lifecycle rules, consisting of an action and a condition. See Object Lifecycle Management Only the age condition is supported. Only the delete and SetStorageClass actions are supported

location string

Data for blobs in the bucket resides in physical storage within this region. A list of supported values is available here.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
logging
All of: io.kestra.plugin.gcp.gcs.models.Logging object, The bucket's logging configuration
notFoundPage string
projectId string
requesterPays boolean

Whether a user accessing the bucket or an object it contains should assume the transit costs related to the access.

retentionPeriod integer

If policy is not locked this value can be cleared, increased, and decreased. If policy is locked the retention period can only be increased.

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
storageClass string

This defines how blobs in the bucket are stored and determines the SLA and the cost of storage. A list of supported values is available here.

Values: "REGIONAL" "MULTI_REGIONAL" "NEARLINE" "COLDLINE" "STANDARD" "ARCHIVE" "DURABLE_REDUCED_AVAILABILITY"
timeout string
format=duration
versioningEnabled boolean

When set to true, versioning is fully enabled.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.CreateBucketIamPolicy object
Examples

Add role to a service account on a bucket

id: gcp_gcs_create_bucket_iam_policy
namespace: company.team

tasks:
  - id: create_bucket_iam_policy
    type: io.kestra.plugin.gcp.gcs.CreateBucketIamPolicy
    name: "my-bucket"
    member: "[email protected]"
    role: "roles/storage.admin"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
member string required
name string required
role string required
type const: "io.kestra.plugin.gcp.gcs.CreateBucketIamPolicy" required
Constant: "io.kestra.plugin.gcp.gcs.CreateBucketIamPolicy"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
ifExists string

Default value is : SKIP

Default: "SKIP"
Values: "ERROR" "SKIP"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.Delete object
Examples
id: gcp_gcs_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.gcp.gcs.Delete
    uri: "gs://my_bucket/dir/file.csv"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gcs.Delete" required
Constant: "io.kestra.plugin.gcp.gcs.Delete"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
errorOnMissing boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
uri string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.DeleteBucket object
Examples

Delete a bucket

id: gcp_gcs_delete_bucket
namespace: company.team

tasks:
  - id: delete_bucket
    type: io.kestra.plugin.gcp.gcs.DeleteBucket
    name: "my-bucket"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.gcp.gcs.DeleteBucket" required
Constant: "io.kestra.plugin.gcp.gcs.DeleteBucket"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.DeleteList object
Examples
id: gcp_gcs_delete_list
namespace: company.team

tasks:
  - id: delete_list
    type: io.kestra.plugin.gcp.gcs.DeleteList
    from: "gs://my_bucket/dir/"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gcs.DeleteList" required
Constant: "io.kestra.plugin.gcp.gcs.DeleteList"
allVersions boolean
allowFailure boolean

Default value is : false

Default: false
concurrent integer
min=2
description string
disabled boolean

Default value is : false

Default: false
errorOnEmpty boolean

Default value is : false

Default: false
listingType string

if DIRECTORY, will only list objects in the specified directory if RECURSIVE, will list objects in the specified directory recursively Default value is DIRECTORY When using RECURSIVE value, be careful to move your files to a location not in the from scope

Default value is : DIRECTORY

Default: "DIRECTORY"
Values: "RECURSIVE" "DIRECTORY"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
regExp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.Download object
Examples
id: gcp_gcs_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.gcp.gcs.Download
    from: "gs://my_bucket/dir/file.csv"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gcs.Download" required
Constant: "io.kestra.plugin.gcp.gcs.Download"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
from string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.Downloads object
Examples

Download a list of files and move it to an archive folders

id: gcp_gcs_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.gcp.gcs.Downloads
    from: gs://my-bucket/kestra/files/
    action: MOVE
    moveDirectory: gs://my-bucket/kestra/archive/

action string required
Values: "MOVE" "DELETE" "NONE"
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gcs.Downloads" required
Constant: "io.kestra.plugin.gcp.gcs.Downloads"
allVersions boolean
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
listingType string

if DIRECTORY, will only list objects in the specified directory if RECURSIVE, will list objects in the specified directory recursively Default value is DIRECTORY When using RECURSIVE value, be careful to move your files to a location not in the from scope

Default value is : DIRECTORY

Default: "DIRECTORY"
Values: "RECURSIVE" "DIRECTORY"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveDirectory string
projectId string
regExp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.List object
Examples

List files in a bucket

id: gcp_gcs_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.gcp.gcs.List
    from: "gs://my_bucket/dir/"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gcs.List" required
Constant: "io.kestra.plugin.gcp.gcs.List"
allVersions boolean
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
listingType string

if DIRECTORY, will only list objects in the specified directory if RECURSIVE, will list objects in the specified directory recursively Default value is DIRECTORY When using RECURSIVE value, be careful to move your files to a location not in the from scope

Default value is : DIRECTORY

Default: "DIRECTORY"
Values: "RECURSIVE" "DIRECTORY"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
regExp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.Trigger object

This trigger will poll every interval a GCS bucket. You can search for all files in a bucket or directory in from or you can filter the files with a regExp.The detection is atomic, internally we do a list and interact only with files listed. Once a file is detected, we download the file on internal storage and processed with declared action in order to move or delete the files from the bucket (to avoid double detection on new poll).##### Examples

Wait for a list of files on a GCS bucket, and iterate through the files.

id: gcs-listen
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
    value: "{{ trigger.blobs | jq('.[].uri') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.gcp.gcs.Trigger
    interval: "PT5M"
    from: gs://my-bucket/kestra/listen/
    action: MOVE
    moveDirectory: gs://my-bucket/kestra/archive/

Wait for a list of files on a GCS bucket and iterate through the files. Delete files manually after processing to prevent infinite triggering.

id: gcs-listen
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
      - id: delete
        type: io.kestra.plugin.gcp.gcs.Delete
        uri: "{{ taskrun.value }}"
    value: "{{ trigger.blobs | jq('.[].uri') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.gcp.gcs.Trigger
    interval: "PT5M"
    from: gs://my-bucket/kestra/listen/
    action: NONE
action string required
Values: "MOVE" "DELETE" "NONE"
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gcs.Trigger" required
Constant: "io.kestra.plugin.gcp.gcs.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
listingType string

if DIRECTORY, will only list objects in the specified directory if RECURSIVE, will list objects in the specified directory recursively Default value is DIRECTORY When using RECURSIVE value, be careful to move your files to a location not in the from scope

Default value is : DIRECTORY

Default: "DIRECTORY"
Values: "RECURSIVE" "DIRECTORY"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
moveDirectory string
projectId string
regExp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.UpdateBucket object
Examples

Update some bucket labels

id: gcp_gcs_update_bucket
namespace: company.team

tasks:
  - id: update_bucket
    type: io.kestra.plugin.gcp.gcs.UpdateBucket
    name: "my-bucket"
    labels:
      my-label: my-value

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
type const: "io.kestra.plugin.gcp.gcs.UpdateBucket" required
Constant: "io.kestra.plugin.gcp.gcs.UpdateBucket"
allowFailure boolean

Default value is : false

Default: false

The access control configuration to apply to bucket's blobs when no other configuration is specified. See About Access Control Lists

defaultEventBasedHold boolean
defaultKmsKeyName string
description string
disabled boolean

Default value is : false

Default: false
iamConfiguration
All of: io.kestra.plugin.gcp.gcs.models.IamConfiguration object, The Bucket's IAM Configuration
indexPage string

Behaves as the bucket's directory index where missing blobs are treated as potential directories.

labels object

This configuration is expressed as a number of lifecycle rules, consisting of an action and a condition. See Object Lifecycle Management Only the age condition is supported. Only the delete and SetStorageClass actions are supported

location string

Data for blobs in the bucket resides in physical storage within this region. A list of supported values is available here.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
logging
All of: io.kestra.plugin.gcp.gcs.models.Logging object, The bucket's logging configuration
notFoundPage string
projectId string
requesterPays boolean

Whether a user accessing the bucket or an object it contains should assume the transit costs related to the access.

retentionPeriod integer

If policy is not locked this value can be cleared, increased, and decreased. If policy is locked the retention period can only be increased.

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
storageClass string

This defines how blobs in the bucket are stored and determines the SLA and the cost of storage. A list of supported values is available here.

Values: "REGIONAL" "MULTI_REGIONAL" "NEARLINE" "COLDLINE" "STANDARD" "ARCHIVE" "DURABLE_REDUCED_AVAILABILITY"
timeout string
format=duration
versioningEnabled boolean

When set to true, versioning is fully enabled.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.Upload object
Examples
id: gcp_gcs_upload
namespace: company.team

tasks:
  - id: upload
    type: io.kestra.plugin.gcp.gcs.Upload
    from: "{{ inputs.file }}"
    to: "gs://my_bucket/dir/file.csv"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gcs.Upload" required
Constant: "io.kestra.plugin.gcp.gcs.Upload"
allowFailure boolean

Default value is : false

Default: false
cacheControl string
contentDisposition string
contentEncoding string
contentType string
description string
disabled boolean

Default value is : false

Default: false
from string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
to string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.gcs.models.AccessControl object
entity required
All of: io.kestra.plugin.gcp.gcs.models.Entity object, The entity
role string required
Values: "READER" "WRITER" "OWNER"
io.kestra.plugin.gcp.gcs.models.BucketLifecycleRule object
action required
All of: io.kestra.plugin.gcp.gcs.models.BucketLifecycleRule-Action object, The action to take when a lifecycle condition is met
condition required
io.kestra.plugin.gcp.gcs.models.BucketLifecycleRule-Action object
type string required
Values: "DELETE" "SET_STORAGE_CLASS"
value string
io.kestra.plugin.gcp.gcs.models.BucketLifecycleRule-Condition object
age integer required
io.kestra.plugin.gcp.gcs.models.Cors object
maxAgeSeconds integer
methods string[]
responseHeaders string[]
io.kestra.plugin.gcp.gcs.models.Entity object
type string required
Values: "DOMAIN" "GROUP" "USER"
value string required
io.kestra.plugin.gcp.gcs.models.IamConfiguration object
publicAccessPrevention string
Values: "ENFORCED" "UNSPECIFIED" "UNKNOWN" "INHERITED"
uniformBucketLevelAccessEnabled boolean
io.kestra.plugin.gcp.gcs.models.Logging object
logBucket string
logObjectPrefix string
io.kestra.plugin.gcp.gke.ClusterMetadata object
Examples

Fetch a GKE cluster's metadata.

id: gcp_gke_cluster_metadata
namespace: company.team

tasks:
  - id: cluster_metadata
    type: io.kestra.plugin.gcp.gke.ClusterMetadata
    clusterProjectId: my-project-id
    clusterZone: europe-west1-c
    clusterId: my-cluster-id

clusterId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.gcp.gke.ClusterMetadata" required
Constant: "io.kestra.plugin.gcp.gke.ClusterMetadata"
allowFailure boolean

Default value is : false

Default: false
clusterProjectId string
clusterZone string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.pubsub.Consume object

Requires a maxDuration or a maxRecords.##### Examples

id: gcp_pubsub_consume
namespace: company.team

tasks:
  - id: consume
    type: io.kestra.plugin.gcp.pubsub.Consume
    topic: topic-test
    maxRecords: 10
    projectId: {{ secret('GCP_PROJECT_ID') }}
    subscription: my-subscription

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subscription string required

The Pub/Sub subscription. It will be created automatically if it didn't exist and 'autoCreateSubscription' is enabled.

topic string required

The Pub/Sub topic. It must be created before executing the task.

type const: "io.kestra.plugin.gcp.pubsub.Consume" required
Constant: "io.kestra.plugin.gcp.pubsub.Consume"
allowFailure boolean

Default value is : false

Default: false
autoCreateSubscription boolean

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string
format=duration
maxRecords integer
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.pubsub.Publish object
Examples
id: gcp_pubsub_publish
namespace: company.team

tasks:
  - id: publish
    type: io.kestra.plugin.gcp.pubsub.Publish
    topic: topic-test
    from:
      - data: "{{ 'base64-encoded-string-1' | base64encode }}"
         attributes:
             testAttribute: KestraTest
      - messageId: '1234'
      - orderingKey: 'foo'
      - data: "{{ 'base64-encoded-string-2' | base64encode }}"
      - attributes:
             testAttribute: KestraTest

Can be an internal storage URI, a list of Pub/Sub messages, or a single Pub/Sub message.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topic string required

The Pub/Sub topic. It must be created before executing the task.

type const: "io.kestra.plugin.gcp.pubsub.Publish" required
Constant: "io.kestra.plugin.gcp.pubsub.Publish"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.pubsub.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.gcp.pubsub.Trigger instead.##### Examples

Consume a message from a Pub/Sub topic in real-time.

id: realtime-pubsub
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "Received: {{ trigger.data }}"

triggers:
  - id: trigger
    type: io.kestra.plugin.gcp.pubsub.RealtimeTrigger
    projectId: test-project-id
    topic: test-topic
    subscription: test-subscription

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topic string required

The Pub/Sub topic. It must be created before executing the task.

type const: "io.kestra.plugin.gcp.pubsub.RealtimeTrigger" required
Constant: "io.kestra.plugin.gcp.pubsub.RealtimeTrigger"
autoCreateSubscription boolean

Default value is : true

Default: true
conditions array
description string
disabled boolean

Default value is : false

Default: false
interval string

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string
format=duration
maxRecords integer
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
serviceAccount string
stopAfter string[]
subscription string

The Pub/Sub subscription. It will be created automatically if it didn't exist and 'autoCreateSubscription' is enabled.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.pubsub.Trigger object

If you would like to consume each message from a Pub/Sub topic in real-time and create one execution per message, you can use the io.kestra.plugin.gcp.pubsub.RealtimeTrigger instead.##### Examples

id: gcp_trigger
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "Received: {{ trigger.data }}"

triggers:
  - id: trigger
    type: io.kestra.plugin.gcp.pubsub.Trigger
    projectId: test-project-id
    subscription: test-subscription
    topic: test-topic
    maxRecords: 10

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topic string required

The Pub/Sub topic. It must be created before executing the task.

type const: "io.kestra.plugin.gcp.pubsub.Trigger" required
Constant: "io.kestra.plugin.gcp.pubsub.Trigger"
autoCreateSubscription boolean

Default value is : true

Default: true
conditions array
description string
disabled boolean

Default value is : false

Default: false
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string
format=duration
maxRecords integer
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serdeType string

Default value is : STRING

Default: "STRING"
Values: "STRING" "JSON"
serviceAccount string
stopAfter string[]
subscription string

The Pub/Sub subscription. It will be created automatically if it didn't exist and 'autoCreateSubscription' is enabled.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.pubsub.model.Message object
attributes object
data

If it's a string, it can be a dynamic property otherwise not.

messageId string
orderingKey string
io.kestra.plugin.gcp.vertexai.AbstractGenerativeAi-ModelParameter object
maxOutputTokens integer

Specify a lower value for shorter responses and a higher value for longer responses. A token may be smaller than a word. A token is approximately four characters. 100 tokens correspond to roughly 60-80 words.

Default value is : 128

Default: 128
min=1max=1024
temperature number

Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that require a more deterministic and less open-ended or creative response, while higher temperatures can lead to more diverse or creative results. A temperature of 0 is deterministic: the highest probability response is always selected. For most use cases, try starting with a temperature of 0.2.

Default value is : 0.2

Default: 0.2
max=1exclusiveMin=0
topK integer

A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature). For each token selection step, the top K tokens with the highest probabilities are sampled. Then tokens are further filtered based on topP with the final token selected using temperature sampling. Specify a lower value for less random responses and a higher value for more random responses.

Default value is : 40

Default: 40
min=1max=40
topP number

Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5, then the model will select either A or B as the next token (using temperature) and doesn't consider C. The default top-p value is 0.95. Specify a lower value for less random responses and a higher value for more random responses.

Default value is : 0.95

Default: 0.95
max=1exclusiveMin=0
io.kestra.plugin.gcp.vertexai.ChatCompletion object

See Generative AI quickstart using the Vertex AI API for more information.##### Examples

Chat completion using the Vertex AI Gemini API.

id: gcp_vertexai_chat_completion
namespace: company.team

tasks:
  - id: chat_completion
    type: io.kestra.plugin.gcp.vertexai.ChatCompletion
    region: us-central1
    projectId: my-project
    context: I love jokes that talk about sport
    messages:
      - author: user
        content: Please tell me a joke

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
messages array required

Messages appear in chronological order: oldest first, newest last. When the history of messages causes the input to exceed the maximum length, the oldest messages are removed until the entire prompt is within the allowed limit.

minItems=1
region string required
type const: "io.kestra.plugin.gcp.vertexai.ChatCompletion" required
Constant: "io.kestra.plugin.gcp.vertexai.ChatCompletion"
allowFailure boolean

Default value is : false

Default: false
context string
description string
disabled boolean

Default value is : false

Default: false
examples array
history array

Messages appear in chronological order: oldest first, newest last. When the history of messages causes the input to exceed the maximum length, the oldest messages are removed until the entire prompt is within the allowed limit.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parameters
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.vertexai.ChatCompletion-Example object
input string required
output string required
io.kestra.plugin.gcp.vertexai.ChatCompletion-Message object
content string required
author string
io.kestra.plugin.gcp.vertexai.CustomJob object
Examples
id: gcp_vertexai_custom_job
namespace: company.team

tasks:
  - id: custom_job
    type: io.kestra.plugin.gcp.vertexai.CustomJob
    projectId: my-gcp-project
    region: europe-west1
    displayName: Start Custom Job
    spec:
      workerPoolSpecs:
      - containerSpec:
          imageUri: gcr.io/my-gcp-project/my-dir/my-image:latest
        machineSpec:
          machineType: n1-standard-4
        replicaCount: 1

displayName string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
region string required
spec required
All of: io.kestra.plugin.gcp.vertexai.models.CustomJobSpec object, The job specification.
type const: "io.kestra.plugin.gcp.vertexai.CustomJob" required
Constant: "io.kestra.plugin.gcp.vertexai.CustomJob"
allowFailure boolean

Default value is : false

Default: false
delete boolean

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
wait boolean

Allowing to capture job status & logs.

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.vertexai.MultimodalCompletion object

See Overview of multimodal models for more information.##### Examples

Text completion using the Vertex Gemini API

id: gcp_vertexai_multimodal_completion
namespace: company.team

tasks:
  - id: multimodal_completion
    type: io.kestra.plugin.gcp.vertexai.MultimodalCompletion
    region: us-central1
    projectId: my-project
    contents:
      - content: Please tell me a joke

Multimodal completion using the Vertex Gemini API

id: gcp_vertexai_multimodal_completion
namespace: company.team

inputs:
  - id: image
    type: FILE

tasks:
  - id: multimodal_completion
    type: io.kestra.plugin.gcp.vertexai.MultimodalCompletion
    region: us-central1
    projectId: my-project
    contents:
      - content: Can you describe this image?
      - mimeType: image/jpeg
        content: "{{ inputs.image }}"

contents array required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
region string required
type const: "io.kestra.plugin.gcp.vertexai.MultimodalCompletion" required
Constant: "io.kestra.plugin.gcp.vertexai.MultimodalCompletion"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parameters
projectId string
scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.vertexai.MultimodalCompletion-Content object
content string required

If the content is not text, the mimeType property must be set.

mimeType string
io.kestra.plugin.gcp.vertexai.TextCompletion object

See Generative AI quickstart using the Vertex AI API for more information.##### Examples

Text completion using the Vertex AI Gemini API.

id: gcp_vertexai_text_completion
namespace: company.team

tasks:
  - id: text_completion
    type: io.kestra.plugin.gcp.vertexai.TextCompletion
    region: us-central1
    projectId: my-project
    prompt: Please tell me a joke

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
region string required
type const: "io.kestra.plugin.gcp.vertexai.TextCompletion" required
Constant: "io.kestra.plugin.gcp.vertexai.TextCompletion"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parameters
projectId string
prompt string

Prompts can include preamble, questions, suggestions, instructions, or examples.

scopes string[]

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default value is : - https://www.googleapis.com/auth/cloud-platform

Default:
[
  "https://www.googleapis.com/auth/cloud-platform"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.gcp.vertexai.models.ContainerSpec object
imageUri string required

Must be on google container registry, example: gcr.io/{{ project }}/{{ dir }}/{{ image }}:{{ tag }}

args string[]
commands string[]

It overrides the entrypoint instruction in Dockerfile when provided.

env object

Maximum limit is 100.

io.kestra.plugin.gcp.vertexai.models.CustomJobSpec object
workerPoolSpecs array required

All worker pools except the first one are optional and can be skipped

minItems=1
baseOutputDirectory
All of: io.kestra.plugin.gcp.vertexai.models.GcsDestination object, The Cloud Storage location to store the output of this job.
enableWebAccess boolean
network string

For example, projects/12345/global/networks/myVPC. Format is of the form projects/{project}/global/networks/{network}. Where {project} is a project number, as in 12345, and {network} is a network name. To specify this field, you must have already configured VPC Network Peering for Vertex AI. If this field is left unspecified, the job is not peered with any network.

scheduling
All of: io.kestra.plugin.gcp.vertexai.models.Scheduling object, Scheduling options for a CustomJob.
serviceAccount string
   Users submitting jobs must have act-as permission on this run-as account.
   If unspecified, the [Vertex AI Custom Code Service
   Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents)
   for the CustomJob's project is used.
tensorboard string

will upload Tensorboard logs. Format:projects/{project}/locations/{location}/tensorboards/{tensorboard}

io.kestra.plugin.gcp.vertexai.models.DiscSpec object
bootDiskSizeGb integer

Default value is : 100

Default: 100
bootDiskType string

Default value is : PD_SSD

Default: "PD_SSD"
Values: "PD_SSD" "PD_STANDARD"
io.kestra.plugin.gcp.vertexai.models.GcsDestination object
outputUriPrefix string required

If the uri doesn't end with '/', a '/' will be automatically appended. The directory is created if it doesn't exist.

io.kestra.plugin.gcp.vertexai.models.MachineSpec object
acceleratorCount integer
acceleratorType string
Values: "ACCELERATOR_TYPE_UNSPECIFIED" "NVIDIA_TESLA_K80" "NVIDIA_TESLA_P100" "NVIDIA_TESLA_V100" "NVIDIA_TESLA_P4" "NVIDIA_TESLA_T4" "NVIDIA_TESLA_A100" "NVIDIA_A100_80GB" "NVIDIA_L4" "NVIDIA_H100_80GB" "TPU_V2" "TPU_V3" "TPU_V4_POD" "TPU_V5_LITEPOD" "UNRECOGNIZED"
io.kestra.plugin.gcp.vertexai.models.PythonPackageSpec object
args string[] required

The maximum number of package URIs is 100.

envs object required

Maximum limit is 100.

packageUris string[] required

The maximum number of package URIs is 100.

io.kestra.plugin.gcp.vertexai.models.Scheduling object
restartJobOnWorkerRestart boolean required

This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job.

timeOut string required
format=duration
io.kestra.plugin.gcp.vertexai.models.WorkerPoolSpec object
containerSpec required
All of: io.kestra.plugin.gcp.vertexai.models.ContainerSpec object, The custom container task.
machineSpec required
All of: io.kestra.plugin.gcp.vertexai.models.MachineSpec object, The specification of a single machine.
discSpec
All of: io.kestra.plugin.gcp.vertexai.models.DiscSpec object, The specification of the disk.
pythonPackageSpec
All of: io.kestra.plugin.gcp.vertexai.models.PythonPackageSpec object, The python package specs.
replicaCount integer
io.kestra.plugin.git.Clone object
Examples

Clone a public GitHub repository.

id: git_clone
namespace: company.team

tasks:
  - id: clone
    type: io.kestra.plugin.git.Clone
    url: https://github.com/dbt-labs/jaffle_shop
    branch: main

Clone a private repository from an HTTP server such as a private GitHub repository using a personal access token.

id: git_clone
namespace: company.team

tasks:
  - id: clone
    type: io.kestra.plugin.git.Clone
    url: https://github.com/kestra-io/examples
    branch: main
    username: git_username
    password: your_personal_access_token

Clone a repository from an SSH server. If you want to clone the repository into a specific directory, you can configure the directory property as shown below.

id: git_clone
namespace: company.team

tasks:
  - id: clone
    type: io.kestra.plugin.git.Clone
    url: [email protected]:kestra-io/kestra.git
    directory: kestra
    privateKey: <keyfile_content>
    passphrase: <passphrase>

Clone a GitHub repository and run a Python ETL script. Note that the Worker task is required so that the Python script shares the same local file system with files cloned from GitHub in the previous task.

id: git_python
namespace: company.team

tasks:
  - id: file_system
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/examples
        branch: main
      - id: python_etl
        type: io.kestra.plugin.scripts.python.Commands
        beforeCommands:
          - pip install requests pandas > /dev/null
        commands:
          - python examples/scripts/etl_script.py

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.git.Clone" required
Constant: "io.kestra.plugin.git.Clone"
url string required
allowFailure boolean

Default value is : false

Default: false
branch string
cloneSubmodules boolean
depth integer

Default value is : 1

Default: 1
min=1
description string
directory string

If the directory isn't set, the current directory will be used.

disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
privateKey string

To generate an ECDSA PEM format key from OpenSSH, use the following command: ssh-keygen -t ecdsa -b 256 -m PEM. You can then set this property with your private key content and put your public key on Git.

timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.git.Push object

Replaced by PushFlows and PushNamespaceFiles for flow and namespace files push scenario. You can add inputFiles to be committed and pushed. Furthermore, you can use this task in combination with the Clone task so that you can first clone the repository, then add or modify files and push to Git afterwards. Check the examples below as well as the Version Control with Git documentation for more information.##### Examples

Push flows and namespace files to a Git repository every 15 minutes.

id: push_to_git
namespace: company.team

tasks:
  - id: commit_and_push
    type: io.kestra.plugin.git.Push
    namespaceFiles:
      enabled: true
    flows:
      enabled: true
    url: https://github.com/kestra-io/scripts
    branch: kestra
    username: git_username
    password: "{{ secret('GITHUB_ACCESS_TOKEN') }}"
    commitMessage: "add flows and scripts {{ now() }}"

triggers:
  - id: schedule_push
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "*/15 * * * *"

Clone the main branch, generate a file in a script, and then push that new file to Git. Since we're in a working directory with a .git directory, you don't need to specify the URL in the Push task. However, the Git credentials always need to be explicitly provided on both Clone and Push tasks (unless using task defaults).

id: push_new_file_to_git
namespace: company.team

inputs:
  - id: commit_message
    type: STRING
    defaults: add a new file to Git

tasks:
  - id: wdir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone
        type: io.kestra.plugin.git.Clone
        branch: main
        url: https://github.com/kestra-io/scripts
      - id: generate_data
        type: io.kestra.plugin.scripts.python.Commands
        docker:
          image: ghcr.io/kestra-io/pydata:latest
        commands:
          - python generate_data/generate_orders.py
      - id: push
        type: io.kestra.plugin.git.Push
        username: git_username
        password: myPAT
        branch: feature_branch
        inputFiles:
          to_commit/avg_order.txt: "{{ outputs.generate_data.vars.average_order }}"
        addFilesPattern:
          - to_commit
        commitMessage: "{{ inputs.commit_message }}"

branch string required

If the branch doesn't exist yet, it will be created.

commitMessage string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.git.Push" required
Constant: "io.kestra.plugin.git.Push"
addFilesPattern string[]

A directory name (e.g. dir to add dir/file1 and dir/file2) can also be given to add all files in the directory, recursively. File globs (e.g. *.py) are not yet supported.

Default value is : - .

Default value is : - .

Default:
[
  "."
]
allowFailure boolean

Default value is : false

Default: false
author
All of: io.kestra.plugin.git.Push-Author object, Commit author.
cloneSubmodules boolean
description string
directory string

If the directory isn't set, the current directory will be used.

disabled boolean

Default value is : false

Default: false
flows
All of: io.kestra.plugin.git.Push-FlowFiles object, Whether to push flows from the current namespace to Git.
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
passphrase string
password string
privateKey string

To generate an ECDSA PEM format key from OpenSSH, use the following command: ssh-keygen -t ecdsa -b 256 -m PEM. You can then set this property with your private key content and put your public key on Git.

timeout string
format=duration
url string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.git.Push-Author object
email string
name string
io.kestra.plugin.git.Push-FlowFiles object
childNamespaces boolean

Default value is : true

Default: true
enabled boolean

Default value is : true

Default: true
gitDirectory string

Default value is : _flows

Default: "_flows"
io.kestra.plugin.git.PushFlows object

Using this task, you can push one or more flows from a given namespace (and optionally also child namespaces) to Git. Check the examples below to see how you can push all flows or only specific ones. You can also learn about Git integration in the Version Control with Git documentation.##### Examples

Automatically push all saved flows from the dev namespace and all child namespaces to a Git repository every day at 5 p.m. Before pushing to Git, the task will adjust the flow's source code to match the targetNamespace to prepare the Git branch for merging to the production namespace.

id: push_to_git
namespace: company.team

tasks:
  - id: commit_and_push
    type: io.kestra.plugin.git.PushFlows
    sourceNamespace: dev # the namespace from which flows are pushed
    targetNamespace: prod # the target production namespace; if different than sourceNamespace, the sourceNamespace in the source code will be overwritten by the targetNamespace
    flows: "*"  # optional list of glob patterns; by default, all flows are pushed
    includeChildNamespaces: true # optional boolean, false by default
    gitDirectory: _flows
    url: https://github.com/kestra-io/scripts # required string
    username: git_username # required string needed for Auth with Git
    password: "{{ secret('GITHUB_ACCESS_TOKEN') }}"
    branch: kestra # optional, uses "kestra" by default
    commitMessage: "add flows {{ now() }}" # optional string
    dryRun: true  # if true, you'll see what files will be added, modified or deleted based on the state in Git without overwriting the files yet

triggers:
  - id: schedule_push
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "0 17 * * *" # release/push to Git every day at 5pm

Manually push a single flow to Git if the input push is set to true.

id: myflow
namespace: prod

inputs:
  - id: push
    type: BOOLEAN
    defaults: false

tasks:
  - id: if
    type: io.kestra.plugin.core.flow.If
    condition: "{{ inputs.push == true}}"
    then:
      - id: commit_and_push
        type: io.kestra.plugin.git.PushFlows
        sourceNamespace: prod # optional; if you prefer templating, you can use "{{ flow.namespace }}"
        targetNamespace: prod # optional; by default, set to the same namespace as defined in sourceNamespace
        flows: myflow # if you prefer templating, you can use "{{ flow.id }}"
        url: https://github.com/kestra-io/scripts
        username: git_username
        password: "{{ secret('GITHUB_ACCESS_TOKEN') }}"
        branch: kestra
        commitMessage: "add flow {{ flow.namespace ~ '.' ~ flow.id }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.git.PushFlows" required
Constant: "io.kestra.plugin.git.PushFlows"
allowFailure boolean

Default value is : false

Default: false
authorEmail string

If null, no author will be set on this commit.

authorName string

If null, the username will be used instead.

Default value is : 'username'

Default: "`username`"
branch string

If the branch doesn't exist yet, it will be created.

Default value is : kestra

Default: "kestra"
cloneSubmodules boolean
commitMessage string

Default value is : Add flows from sourceNamespace namespace

Default: "Add flows from `sourceNamespace` namespace"
description string
disabled boolean

Default value is : false

Default: false
dryRun boolean

Default value is : false

Default: false
flows string | string[]

By default, all flows from the specified sourceNamespace will be pushed (and optionally adjusted to match the targetNamespace before pushing to Git). If you want to push only the current flow, you can use the "{{flow.id}}" expression or specify the flow ID explicitly, e.g. myflow. Given that this is a list of glob patterns, you can include as many flows as you wish, provided that the user is authorized to access that namespace. Note that each glob pattern try to match the file name OR the relative path starting from gitDirectory

Default value is : '**'

Default: "**"
gitDirectory string

If not set, flows will be pushed to a Git directory named _flows and will optionally also include subdirectories named after the child namespaces. If you prefer, you can specify an arbitrary path, e.g., kestra/flows, allowing you to push flows to that specific Git directory. If the includeChildNamespaces property is set to true, this task will also push all flows from child namespaces into their corresponding nested directories, e.g., flows from the child namespace called prod.marketing will be added to the marketing folder within the _flows folder. Note that the targetNamespace (here prod) is specified in the flow code; therefore, kestra will not create the prod directory within _flows. You can use the PushFlows task to push flows from the sourceNamespace, and use SyncFlows to then sync PR-approved flows to the targetNamespace, including all child namespaces.

Default value is : _flows

Default: "_flows"
includeChildNamespaces boolean

By default, it’s false, so the task will push only flows from the explicitly declared namespace without pushing flows from child namespaces. If set to true, flows from child namespaces will be pushed to child directories in Git. See the example below for a practical explanation:

Source namespace in the flow codeGit directory pathSynced to target namespace
namespace: dev_flows/flow1.ymlnamespace: prod
namespace: dev_flows/flow2.ymlnamespace: prod
namespace: dev.marketing_flows/marketing/flow3.ymlnamespace: prod.marketing
namespace: dev.marketing_flows/marketing/flow4.ymlnamespace: prod.marketing
namespace: dev.marketing.crm_flows/marketing/crm/flow5.ymlnamespace: prod.marketing.crm
namespace: dev.marketing.crm_flows/marketing/crm/flow6.ymlnamespace: prod.marketing.crm

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
privateKey string

To generate an ECDSA PEM format key from OpenSSH, use the following command: ssh-keygen -t ecdsa -b 256 -m PEM. You can then set this property with your private key content and put your public key on Git.

sourceNamespace string

Default value is : "{{ flow.namespace }}"

Default: "{{ flow.namespace }}"
targetNamespace string

If set, the sourceNamespace will be overwritten to the targetNamespace in the flow source code to prepare your branch for merging into the production namespace.

timeout string
format=duration
url string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.git.PushNamespaceFiles object

Using this task, you can push one or more Namespace Files from a given kestra namespace to Git. Check the Version Control with Git documentation for more details.##### Examples

Push all saved Namespace Files from the dev namespace to a Git repository every 15 minutes.

id: push_to_git
namespace: company.team

tasks:
  - id: commit_and_push
    type: io.kestra.plugin.git.PushNamespaceFiles
    namespace: dev
    files: "*"  # optional list of glob patterns; by default, all files are pushed
    gitDirectory: _files # optional path in Git where Namespace Files should be pushed
    url: https://github.com/kestra-io/scripts # required string
    username: git_username # required string needed for Auth with Git
    password: "{{ secret('GITHUB_ACCESS_TOKEN') }}"
    branch: dev # optional, uses "kestra" by default
    commitMessage: "add namespace files" # optional string
    dryRun: true  # if true, you'll see what files will be added, modified or deleted based on the state in Git without overwriting the files yet

triggers:
  - id: schedule_push_to_git
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "*/15 * * * *"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.git.PushNamespaceFiles" required
Constant: "io.kestra.plugin.git.PushNamespaceFiles"
allowFailure boolean

Default value is : false

Default: false
authorEmail string

If null, no author will be set on this commit.

authorName string

If null, the username will be used instead.

Default value is : 'username'

Default: "`username`"
branch string

If the branch doesn’t exist yet, it will be created. If not set, the task will push the files to the kestra branch.

Default value is : kestra

Default: "kestra"
cloneSubmodules boolean
commitMessage string

Default value is : Add files from namespace namespace

Default: "Add files from `namespace` namespace"
description string
disabled boolean

Default value is : false

Default: false
dryRun boolean

Default value is : false

Default: false
files

By default, Kestra will push all Namespace Files from the specified namespace. If you want to push only a specific file or directory e.g. myfile.py, you can set it explicitly using files: myfile.py. Given that this is a glob pattern string (or a list of glob patterns), you can include as many files as you wish, provided that the user is authorized to access that namespace. Note that each glob pattern try to match the file name OR the relative path starting from gitDirectory

Default value is : '**'

Default: "**"
gitDirectory string

If not set, files will be pushed to a Git directory named _files. See the table below for an example mapping of Namespace Files to Git paths:

Namespace File PathGit directory path
scripts/app.py_files/scripts/app.py
scripts/etl.py_files/scripts/etl.py
queries/orders.sql_files/queries/orders.sql
queries/customers.sql_files/queries/customers.sql
requirements.txt_files/requirements.txt

Default value is : _files

Default: "_files"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string

Default value is : "{{ flow.namespace }}"

Default: "{{ flow.namespace }}"
passphrase string
password string
privateKey string

To generate an ECDSA PEM format key from OpenSSH, use the following command: ssh-keygen -t ecdsa -b 256 -m PEM. You can then set this property with your private key content and put your public key on Git.

timeout string
format=duration
url string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.git.Sync object

Replaced by SyncFlows and SyncNamespaceFiles. Files located in gitDirectory will be synced with namespace files under namespaceFilesDirectory folder. Any file not present in the gitDirectory but present in namespaceFilesDirectory will be deleted from namespace files to ensure that Git remains a single source of truth for your workflow and application code. If you don't want some files from Git to be synced, you can add them to a .kestraignore file at the root of your gitDirectory folder — that file works the same way as .gitignore.

If there is a _flows folder under the gitDirectory folder, any file within it will be parsed and imported as a flow under the namespace declared in the task. It's important to keep in mind that all flows must be located within the same directory without any nested directories. If you want to deploy all flows to kestra from Git using the Git Sync pattern, you have to place all your flows in the _flows directory. Adding namespace folders will result in an error and that's expected. Flows are not equivalent to Namespace Files — while Namespace Files can be stored in arbitrarily nested folders stored in Internal Storage, Flows are just metadata. Flows are sent to Kestra's API and stored in the database backend. This is why they follow a different deployment pattern and cannot be stored in nested folders in Git.

Another important aspect is that the namespace defined in the flow code might get overwritten (!) if the namespace defined within Git doesn't match the namespace or a child namespace defined in the Git Sync task. All Git deployments, both the Git Sync and Kestra's CI/CD integrations, operate on a namespace level to ensure namespace-level governance of permissions, secrets, and to allow separation of resources. If you leverage multiple namespaces in a monorepo, you can create multiple flows, each using the Git Sync task to sync specific Git directories to the desired namespaces.##### Examples

Synchronizes namespace files and flows based on the current state in a Git repository. This flow can run either on a schedule (using the Schedule trigger) or anytime you push a change to a given Git branch (using the Webhook trigger).

id: sync_from_git
namespace: company.team

tasks:
  - id: git
    type: io.kestra.plugin.git.Sync
    url: https://github.com/kestra-io/scripts
    branch: main
    username: git_username
    password: "{{ secret('GITHUB_ACCESS_TOKEN') }}"
    gitDirectory: your_git_dir # optional, otherwise all files
    namespaceFilesDirectory: your_namespace_files_location # optional, otherwise the namespace root directory
    dryRun: true  # if true, print the output of what files will be added/modified or deleted without overwriting the files yet

triggers:
  - id: every_minute
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "*/1 * * * *"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.git.Sync" required
Constant: "io.kestra.plugin.git.Sync"
url string required
allowFailure boolean

Default value is : false

Default: false
branch string
cloneSubmodules boolean
description string
disabled boolean

Default value is : false

Default: false
dryRun boolean
gitDirectory string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFilesDirectory string
passphrase string
password string
privateKey string

To generate an ECDSA PEM format key from OpenSSH, use the following command: ssh-keygen -t ecdsa -b 256 -m PEM. You can then set this property with your private key content and put your public key on Git.

timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.git.SyncFlows object

This task syncs flows from a given Git branch to a Kestra namespace. If the delete property is set to true, any flow available in kestra but not present in the gitDirectory will be deleted, considering Git as a single source of truth for your flows. Check the Version Control with Git documentation for more details.##### Examples

Sync flows from a Git repository. This flow can run either on a schedule (using the Schedule trigger) or anytime you push a change to a given Git branch (using the Webhook trigger).

id: sync_flows_from_git
namespace: company.team

tasks:
  - id: git
    type: io.kestra.plugin.git.SyncFlows
    gitDirectory: flows # optional; set to _flows by default
    targetNamespace: git # required
    includeChildNamespaces: true # optional; by default, it's set to false to allow explicit definition
    delete: true # optional; by default, it's set to false to avoid destructive behavior
    url: https://github.com/kestra-io/flows # required
    branch: main
    username: git_username
    password: "{{ secret('GITHUB_ACCESS_TOKEN') }}"
    dryRun: true  # if true, the task will only log which flows from Git will be added/modified or deleted in kestra without making any changes in kestra backend yet

triggers:
  - id: every_full_hour
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "0 * * * *"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
targetNamespace string required

If the top-level namespace specified in the flow source code is different than the targetNamespace, it will be overwritten by this target namespace. This facilitates moving between environments and projects. If includeChildNamespaces property is set to true, the top-level namespace in the source code will also be overwritten by the targetNamespace in children namespaces.

For example, if the targetNamespace is set to prod and includeChildNamespaces property is set to true, then:

  • namespace: dev in flow source code will be overwritten by namespace: prod,
  • namespace: dev.marketing.crm will be overwritten by namespace: prod.marketing.crm.

See the table below for a practical explanation:

Source namespace in the flow codeGit directory pathSynced to target namespace
namespace: dev_flows/flow1.ymlnamespace: prod
namespace: dev_flows/flow2.ymlnamespace: prod
namespace: dev.marketing_flows/marketing/flow3.ymlnamespace: prod.marketing
namespace: dev.marketing_flows/marketing/flow4.ymlnamespace: prod.marketing
namespace: dev.marketing.crm_flows/marketing/crm/flow5.ymlnamespace: prod.marketing.crm
namespace: dev.marketing.crm_flows/marketing/crm/flow6.ymlnamespace: prod.marketing.crm
type const: "io.kestra.plugin.git.SyncFlows" required
Constant: "io.kestra.plugin.git.SyncFlows"
allowFailure boolean

Default value is : false

Default: false
branch string

Default value is : main

Default: "main"
cloneSubmodules boolean
delete boolean

It’s false by default to avoid destructive behavior. Use this property with caution because when set to true and includeChildNamespaces is also set to true, this task will delete all flows from the targetNamespace and all its child namespaces that are not present in Git rather than only overwriting the changes.

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
dryRun boolean

Default value is : false

Default: false
flowService object
gitDirectory string

If not set, this task assumes your branch has a Git directory named _flows (equivalent to the default gitDirectory of the PushFlows task).

If includeChildNamespaces property is set to true, this task will push all flows from nested subdirectories into their corresponding child namespaces, e.g. if targetNamespace is set to prod, then:

  • flows from the _flows directory will be synced to the prod namespace,
  • flows from the _flows/marketing subdirectory in Git will be synced to the prod.marketing namespace,
  • flows from the _flows/marketing/crm subdirectory will be synced to the prod.marketing.crm namespace.

Default value is : _flows

Default: "_flows"
includeChildNamespaces boolean

It’s false by default so that we sync only flows from the explicitly declared gitDirectory without traversing child directories. If set to true, flows from subdirectories in Git will be synced to child namespace in Kestra using the dot notation . for each subdirectory in the folder structure.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
passphrase string
password string
privateKey string

To generate an ECDSA PEM format key from OpenSSH, use the following command: ssh-keygen -t ecdsa -b 256 -m PEM. You can then set this property with your private key content and put your public key on Git.

timeout string
format=duration
url string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.git.SyncNamespaceFiles object

This task syncs Namespace Files from a given Git branch to a Kestra `namespace. If the delete property is set to true, any Namespace Files available in kestra but not present in the gitDirectory will be deleted, allowing to maintain Git as a single source of truth for your Namespace Files. Check the Version Control with Git documentation for more details. Using this task, you can push one or more Namespace Files from a given kestra namespace to Git. Check the Version Control with Git documentation for more details.##### Examples

Sync Namespace Files from a Git repository. This flow can run either on a schedule (using the Schedule trigger) or anytime you push a change to a given Git branch (using the Webhook trigger).

id: sync_from_git
namespace: company.team

tasks:
  - id: git
    type: io.kestra.plugin.git.SyncNamespaceFiles
    namespace: prod
    gitDirectory: _files # optional; set to _files by default
    delete: true # optional; by default, it's set to false to avoid destructive behavior
    url: https://github.com/kestra-io/flows
    branch: main
    username: git_username
    password: "{{ secret('GITHUB_ACCESS_TOKEN') }}"
    dryRun: true  # if true, the task will only log which flows from Git will be added/modified or deleted in kestra without making any changes in kestra backend yet

triggers:
  - id: every_minute
    type: io.kestra.plugin.core.trigger.Schedule
    cron: "*/1 * * * *"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.git.SyncNamespaceFiles" required
Constant: "io.kestra.plugin.git.SyncNamespaceFiles"
allowFailure boolean

Default value is : false

Default: false
branch string

Default value is : kestra

Default: "kestra"
cloneSubmodules boolean
delete boolean

It’s false by default to avoid destructive behavior. Use with caution because when set to true, this task will delete all Namespace Files which are not present in Git.

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
dryRun boolean

Default value is : false

Default: false
gitDirectory string

If not set, this task assumes your branch includes a directory named _files

Default value is : _files

Default: "_files"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string

Default value is : "{{ flow.namespace }}"

Default: "{{ flow.namespace }}"
passphrase string
password string
privateKey string

To generate an ECDSA PEM format key from OpenSSH, use the following command: ssh-keygen -t ecdsa -b 256 -m PEM. You can then set this property with your private key content and put your public key on Git.

timeout string
format=duration
url string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.code.Search object

Requires authentication.##### Examples

Search for code in a repository.

id: github_code_search_flow
namespace: company.team

tasks:
  - id: search_code
    type: io.kestra.plugin.github.code.Search
    oauthToken: your_github_token
    query: "addClass in:file language:js repo:jquery/jquery"

Search for code in a repository.

id: github_code_search_flow
namespace: company.team

tasks:
  - id: search_code
    type: io.kestra.plugin.github.code.Search
    oauthToken: your_github_token
    query: addClass
    in: file
    language: js
    repository: jquery/jquery

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.code.Search" required
Constant: "io.kestra.plugin.github.code.Search"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
extension string
filename string
fork string

Whether to include forks.

Values: "PARENT_AND_FORKS" "FORKS_ONLY" "PARENT_ONLY"
in string
jwtToken string

Does not requires additional fields to log-in

language string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

order string

ASC - the results will be in ascending order

DESC - the results will be in descending order

Default value is : ASC

Default: "ASC"
Values: "ASC" "DESC"
path string
query string

Allow you to limit your search to specific areas of GitHub.

repository string
size string
sort string

BEST_MATCH - the results will be sorted by best match results

INDEXED - the results will be sorted by the index

Default value is : BEST_MATCH

Default: "BEST_MATCH"
Values: "BEST_MATCH" "INDEXED"
timeout string
format=duration
user string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.commits.Search object

Requires authentication.##### Examples

Search for commits in a repository.

id: github_commit_search_flow
namespace: company.team

tasks:
  - id: search_commit
    type: io.kestra.plugin.github.commits.Search
    oauthToken: your_github_token
    query: "Initial repo:kestra-io/plugin-github language:java"

Search for commits in a repository.

id: github_commit_search_flow
namespace: company.team

tasks:
  - id: search_commit
    type: io.kestra.plugin.github.commits.Search
    oauthToken: your_github_token
    query: Initial
    repository: kestra-io/plugin-github

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.commits.Search" required
Constant: "io.kestra.plugin.github.commits.Search"
allowFailure boolean

Default value is : false

Default: false
author string
authorDate string
authorEmail string
authorName string
committer string
committerDate string

When you search for a date, you can use greater than, less than, and range qualifiers to further filter results.

committerEmail string
committerName string
description string
disabled boolean

Default value is : false

Default: false
hash string
is string
jwtToken string

Does not requires additional fields to log-in

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

merge boolean
oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

order string

ASC - the results will be in ascending order

DESC - the results will be in descending order

Default value is : ASC

Default: "ASC"
Values: "ASC" "DESC"
org string
parent string
query string

Allows you to limit your search to specific areas of GitHub.

repository string
sort string

COMMITTER_DATE - the results will be sorted by when user joined to Github

AUTHOR_DATE - the results will be sorted by the number of repositories owned by user

Default value is : COMMITTER_DATE

Default: "COMMITTER_DATE"
Values: "COMMITTER_DATE" "AUTHOR_DATE"
timeout string
format=duration
tree string
user string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.issues.Comment object

If no authentication is provided, anonymous authentication will be used.##### Examples

Put a comment on an issue in a repository.

id: github_comment_on_issue_flow
namespace: company.team

tasks:
  - id: comment_on_issue
    type: io.kestra.plugin.github.issues.Comment
    oauthToken: your_github_token
    repository: kestra-io/kestra
    issueNumber: 1347
    body: "{{ execution.id }} has failed on {{ taskrun.startDate }}. See the link below for more details"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.issues.Comment" required
Constant: "io.kestra.plugin.github.issues.Comment"
allowFailure boolean

Default value is : false

Default: false
body string
description string
disabled boolean

Default value is : false

Default: false
issueNumber integer
jwtToken string

Does not requires additional fields to log-in

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

repository string

Repository where issue/ticket should be created. It's a string of Username + / + Repository name

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.issues.Create object

If no authentication is provided, anonymous authentication will be used.##### Examples

Create an issue in a repository using JWT token.

id: github_issue_create_flow
namespace: company.team

tasks:
  - id: create_issue
    type: io.kestra.plugin.github.issues.Create
    jwtToken: your_github_jwt_token
    repository: kestra-io/kestra
    title: Workflow failed
    body: "{{ execution.id }} has failed on {{ taskrun.startDate }}. See the link below for more details"
    labels:
      - bug
      - workflow

Create an issue in a repository using OAuth token.

id: github_issue_create_flow
namespace: company.team

tasks:
  - id: create_issue
    type: io.kestra.plugin.github.issues.Create
    login: your_github_login
    oauthToken: your_github_token
    repository: kestra-io/kestra
    title: Workflow failed
    body: "{{ execution.id }} has failed on {{ taskrun.startDate }}. See the link below for more details"
    labels:
      - bug
      - workflow

Create an issue in a repository with assignees.

id: github_issue_create_flow
namespace: company.team

tasks:
  - id: create_issue
    type: io.kestra.plugin.github.issues.Create
    oauthToken: your_github_token
    repository: kestra-io/kestra
    title: Workflow failed
    body: "{{ execution.id }} has failed on {{ taskrun.startDate }}. See the link below for more details"
    labels:
      - bug
      - workflow
    assignees:
      - MyDeveloperUserName
      - MyDesignerUserName

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.issues.Create" required
Constant: "io.kestra.plugin.github.issues.Create"
allowFailure boolean

Default value is : false

Default: false
assignees string[]

List of unique names of assignees.

body string
description string
disabled boolean

Default value is : false

Default: false
jwtToken string

Does not requires additional fields to log-in

labels string[]

List of labels for ticket.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

repository string

Repository where issue/ticket should be created. It's a string of Username + / + Repository name

timeout string
format=duration
title string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.issues.Search object

If no authentication is provided, anonymous authentication will be used##### Examples

Search for issues in a repository.

id: github_issue_search_flow
namespace: company.team

tasks:
  - id: search_issues
    type: io.kestra.plugin.github.issues.Search
    oauthToken: your_github_token
    query: "repo:kestra-io/plugin-github is:open"

Search for open issues in a repository.

id: github_issue_search_flow
namespace: company.team

tasks:
  - id: search_open_issues
    type: io.kestra.plugin.github.issues.Search
    oauthToken: your_github_token
    repository: kestra-io/plugin-github
    open: TRUE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.issues.Search" required
Constant: "io.kestra.plugin.github.issues.Search"
allowFailure boolean

Default value is : false

Default: false
closed boolean
description string
disabled boolean

Default value is : false

Default: false
jwtToken string

Does not requires additional fields to log-in

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

mentions string
merged boolean
oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

open boolean
order string

ASC - the results will be in ascending order

DESC - the results will be in descending order

Default value is : ASC

Default: "ASC"
Values: "ASC" "DESC"
query string
sort string

CREATED - Sorts the results of query by the time issue was created (DEFAULT)

UPDATED - Sorts the results of query by the tome issue was last time updated

COMMENTS - Sorts the results of query by the number of comments

Default value is : CREATED

Default: "CREATED"
Values: "CREATED" "UPDATED" "COMMENTS"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.pulls.Create object

If no authentication is provided, anonymous authentication will be used.##### Examples

Create a pull request in a repository.

id: github_pulls_create_flow
namespace: company.team

tasks:
  - id: create_pull_request
    type: io.kestra.plugin.github.pulls.Create
    oauthToken: your_github_token
    repository: kestra-io/kestra
    sourceBranch: develop
    targetBranch: main
    title: Workflow failed
    body: "{{ execution.id }} has failed on {{ taskrun.startDate }}. See the link below for more details"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.pulls.Create" required
Constant: "io.kestra.plugin.github.pulls.Create"
allowFailure boolean

Default value is : false

Default: false
body string

The contents of the pull request. This is the markdown description of a pull request.

description string
disabled boolean

Default value is : false

Default: false
draft boolean

Boolean value indicates whether to create a draft pull request or not. Default is false.

Default value is : false

Default: false
jwtToken string

Does not requires additional fields to log-in

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

maintainerCanModify boolean

Boolean value indicating whether maintainers can modify the pull request. Default is false.

Default value is : false

Default: false
oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

repository string

Repository where issue/ticket should be created. It's a string of Username + / + Repository name

sourceBranch string

Required. The name of the branch where your changes are implemented. For cross-repository pull requests in the same network, namespace head with a user like this: username:branch.

targetBranch string

Required. The name of the branch you want your changes pulled into. This should be an existing branch on the current repository.

timeout string
format=duration
title string

Required. The title of the pull request.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.pulls.Search object

If no authentication is provided, anonymous authentication will be used. Anonymous authentication can't retrieve full information.##### Examples

Search for pull requests in a repository.

id: github_pulls_search_flow
namespace: company.team

tasks:
  - id: search_pull_requests
    type: io.kestra.plugin.github.pulls.Search
    oauthToken: your_github_token
    query: "repo:kestra-io/plugin-github is:open"

Search for open pull requests in a repository.

id: github_pulls_search_flow
namespace: company.team

tasks:
  - id: search_open_pull_requests
    type: io.kestra.plugin.github.pulls.Search
    oauthToken: your_github_token
    repository: kestra-io/plugin-github
    open: TRUE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.pulls.Search" required
Constant: "io.kestra.plugin.github.pulls.Search"
allowFailure boolean

Default value is : false

Default: false
assigned string
author string
base string
closed boolean
closedAt string

You can use greater than, less than, and range qualifiers (.. between two dates) to further filter results.

commit string

The SHA syntax must be at least seven characters.

createdAt string

You can use greater than, less than, and range qualifiers (.. between two dates) to further filter results.

createdByMe boolean

Requires authentication.

description string
disabled boolean

Default value is : false

Default: false
draft boolean
head string
jwtToken string

Does not requires additional fields to log-in

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

mentions string
merged boolean
oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

open boolean
order string

ASC - the results will be in ascending order

DESC - the results will be in descending order

Default value is : ASC

Default: "ASC"
Values: "ASC" "DESC"
query string

Allow you to limit your search to specific areas of GitHub.

repository string
sort string

CREATED - Sorts the results of query by the time issue was created (DEFAULT)

UPDATED - Sorts the results of query by the tome issue was last time updated

COMMENTS - Sorts the results of query by the number of comments

Default value is : CREATED

Default: "CREATED"
Values: "CREATED" "UPDATED" "COMMENTS"
timeout string
format=duration
title string
updatedAt string

You can use greater than, less than, and range qualifiers (.. between two dates) to further filter results

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.repositories.Search object

If no authentication is provided, anonymous authentication will be used. Anonymous authentication can't retrieve full information.##### Examples

Search for Github repositories using query.

id: github_repo_search_flow
namespace: company.team

tasks:
  - id: search_repositories
    type: io.kestra.plugin.github.repositories.Search
    oauthToken: your_github_token
    query: "repo:kestra-io/plugin-github"

Search for Github repositories using repository.

id: github_repo_search_flow
namespace: company.team

tasks:
  - id: search_repositories
    type: io.kestra.plugin.github.repositories.Search
    oauthToken: your_github_token
    repository: kestra-io/plugin-github

Search for Github repositories and order the results.

id: github_repo_search_flow
namespace: company.team

tasks:
  - id: search_repositories
    type: io.kestra.plugin.github.repositories.Search
    oauthToken: your_github_token
    query: "user:kestra-io language:java is:public"
    sort: STARS
    order: DESC

Search for Github repositories with filters like language and visibility, and order the results.

id: github_repo_search_flow
namespace: company.team

tasks:
  - id: search_repositories
    type: io.kestra.plugin.github.repositories.Search
    oauthToken: your_github_token
    user: kestra-io
    language: java
    visibility: PUBLIC
    sort: STARS
    order: DESC

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.repositories.Search" required
Constant: "io.kestra.plugin.github.repositories.Search"
allowFailure boolean

Default value is : false

Default: false
created string
description string
disabled boolean

Default value is : false

Default: false
jwtToken string

Does not requires additional fields to log-in

language string

Can be the language name or alias.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

order string

ASC - the results will be in ascending order

DESC - the results will be in descending order

Default value is : ASC

Default: "ASC"
Values: "ASC" "DESC"
query string

Qualifiers allow you to limit your search to specific areas of GitHub.

repository string

Example string: "myUserName/MyRepository". query equivalent: "repo:myUserName/MyRepository".

sort string

UPDATED - the results will be sorted by when the repository was last updated

STARS - the results will be sorted by the number of stars the repository has

FORKS - the results will be sorted by the number of forks the repository has

Default value is : UPDATED

Default: "UPDATED"
Values: "UPDATED" "STARS" "FORKS"
stars string
timeout string
format=duration
topic string
user string

To search by organization, use: "query: org:myOrganization".

visibility string

PUBLIC - shows only public repositories

PRIVATE - shows only private repositories that are available for user who is searching

INTERNAL - shows only internal repositories

Values: "PUBLIC" "PRIVATE" "INTERNAL"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.topics.Search object

If no authentication is provided, anonymous authentication will be used. Anonymous authentication can't retrieve full information.##### Examples

Search for topics.

id: github_topic_search_flow
namespace: company.team

tasks:
  - id: search_topics
    type: io.kestra.plugin.github.topics.Search
    oauthToken: your_github_token
    query: "micronaut framework is:not-curated repositories:>100"

Search for topics with conditions.

id: github_topic_search_flow
namespace: company.team

tasks:
  - id: search_topics
    type: io.kestra.plugin.github.topics.Search
    oauthToken: your_github_token
    query: "micronaut framework"
    is: NOT_CURATED
    repositories: >100

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.topics.Search" required
Constant: "io.kestra.plugin.github.topics.Search"
allowFailure boolean

Default value is : false

Default: false
created string

You can use greater than, less than, and range qualifiers to further filter results.

description string
disabled boolean

Default value is : false

Default: false
is string

CURATED - Matches topics that are curated

FEATURED - Matches topics that are featured on https://github.com/topics/

NOT_CURATED - Matches topics that don't have extra information, such as a description or logo

NOT_FEATURED - Matches topics that aren't featured on https://github.com/topics/

Values: "CURATED" "FEATURED" "NOT_CURATED" "NOT_FEATURED"
jwtToken string

Does not requires additional fields to log-in

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

order string

ASC - the results will be in ascending order

DESC - the results will be in descending order

Default value is : ASC

Default: "ASC"
Values: "ASC" "DESC"
query string

Allow you to limit your search to specific areas of GitHub.

repositories string

You can use greater than, less than, and range qualifiers to further filter results.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.github.users.Search object

If no authentication is provided, anonymous authentication will be used. Anonymous authentication can't retrieve full information.##### Examples

Search for users.

id: github_user_search_flow
namespace: company.team

tasks:
  - id: search_users
    type: io.kestra.plugin.github.users.Search
    oauthToken: your_github_token
    query: "kestra-io in:login language:java"

Search for users with conditions.

id: github_user_search_flow
namespace: company.team

tasks:
  - id: search_users
    type: io.kestra.plugin.github.users.Search
    oauthToken: your_github_token
    query: kestra-io
    in: login
    language: java

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.github.users.Search" required
Constant: "io.kestra.plugin.github.users.Search"
accountType string

USER - the results will include only user accounts

ORGANIZATION - the results will include only organization accounts

Values: "USER" "ORGANIZATION"
allowFailure boolean

Default value is : false

Default: false
created string

Available formats:

  • '<=YYYY-MM-DD' - joined at or before

  • '>=YYYY-MM-DD' - joined at or after

  • Similar cases for above two with ">", "<"

  • 'YYYY-MM-DD..YYYY-MM-DD' - joined in period between

description string
disabled boolean

Default value is : false

Default: false
followers string
in string

Example kenya in:login matches users with the word "kenya" in their username. One more case of use to search users that have sponsor profile, equivalent to query: is:sponsorable.

jwtToken string

Does not requires additional fields to log-in

language string

Can be the language name or alias.

location string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
login string

Requires additional field: oauthToken, to log-in

oauthToken string

GitHub Personal Access Token. In addition, can be used with login or by its own

order string

ASC - the results will be in ascending order (DEFAULT)

DESC - the results will be in descending order

Default value is : ASC

Default: "ASC"
Values: "ASC" "DESC"
query string

Qualifiers allow you to limit your search to specific areas of GitHub.

repositories integer
sort string

JOINED - the results will be sorted by when user joined to Github (DEFAULT)

REPOSITORIES - the results will be sorted by the number of repositories owned by user

FOLLOWERS - the results will be sorted by the number of followers that user has

Default value is : JOINED

Default: "JOINED"
Values: "JOINED" "REPOSITORIES" "FOLLOWERS"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.drive.Create object
Examples
id: googleworkspace_drive_create
namespace: company.team

tasks:
  - id: create
    type: io.kestra.plugin.googleworkspace.drive.Create
    name: "My Folder"
    mimeType: "application/vnd.google-apps.folder"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.googleworkspace.drive.Create" required
Constant: "io.kestra.plugin.googleworkspace.drive.Create"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mimeType string

Drive will attempt to automatically detect an appropriate value from uploaded content if no value is provided. The value cannot be changed unless a new revision is uploaded. If a file is created with a Google Doc MIME type, the uploaded content will be imported if possible. The supported import formats are published here.

name string

This is not necessarily unique within a folder

parents string[]
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/drive

Default value is : - https://www.googleapis.com/auth/drive

Default:
[
  "https://www.googleapis.com/auth/drive"
]
serviceAccount string
teamDriveId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.drive.Delete object
Examples
id: googleworkspace_drive_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.googleworkspace.drive.Delete
    fileId: "1Dkd3W0OQo-wxz1rrORLP7YGSj6EBLEg74fiTdbJUIQE"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.googleworkspace.drive.Delete" required
Constant: "io.kestra.plugin.googleworkspace.drive.Delete"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fileId string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/drive

Default value is : - https://www.googleapis.com/auth/drive

Default:
[
  "https://www.googleapis.com/auth/drive"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.drive.Download object
Examples
id: googleworkspace_drive_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.googleworkspace.drive.Download
    fileId: "1Dkd3W0OQo-wxz1rrORLP7YGSj6EBLEg74fiTdbJUIQE"

fileId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.googleworkspace.drive.Download" required
Constant: "io.kestra.plugin.googleworkspace.drive.Download"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/drive

Default value is : - https://www.googleapis.com/auth/drive

Default:
[
  "https://www.googleapis.com/auth/drive"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.drive.Export object
Examples
id: googleworkspace_drive_export
namespace: company.team

tasks:
  - id: export
    type: io.kestra.plugin.googleworkspace.drive.Export
    fileId: "1Dkd3W0OQo-wxz1rrORLP7YGSj6EBLEg74fiTdbJUIQE"

contentType string required

a valid RFC2045 like text/csv, application/msword, ...

fileId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.googleworkspace.drive.Export" required
Constant: "io.kestra.plugin.googleworkspace.drive.Export"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/drive

Default value is : - https://www.googleapis.com/auth/drive

Default:
[
  "https://www.googleapis.com/auth/drive"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.drive.List object
Examples

List subfolder in a Drive folder

id: googleworkspace_drive_list
namespace: company.team

tasks:
  - id: list
    type: io.kestra.plugin.googleworkspace.drive.List
    query: |
      mimeType = 'application/vnd.google-apps.folder'
      and '1z2GZgLEX12BN9zbVE6TodrCHyTRMj_ka' in parents

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.googleworkspace.drive.List" required
Constant: "io.kestra.plugin.googleworkspace.drive.List"
allowFailure boolean

Default value is : false

Default: false
corpora string[]

'allTeamDrives' must be combined with 'user'; all other values must be used in isolation. Prefer 'user' or 'teamDrive' to 'allTeamDrives' for efficiency.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
query string

see details here if not defined, will list all files that the service account have access

readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/drive

Default value is : - https://www.googleapis.com/auth/drive

Default:
[
  "https://www.googleapis.com/auth/drive"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.drive.Upload object
Examples

Upload a csv and convert it to sheet format

id: googleworkspace_drive_upload
namespace: company.team

inputs:
  - id: file
    type: FILE
    description: The file to be uploaded to Google Drive

tasks:
  - id: upload
    type: io.kestra.plugin.googleworkspace.drive.Upload
    from: "{{ inputs.file }}"
    parents:
     - "1HuxzpLt1b0111MuKMgy8wAv-m9Myc1E_"
    name: "My awesome CSV"
    contentType: "text/csv"
    mimeType: "application/vnd.google-apps.spreadsheet"

contentType string required

a valid RFC2045 like text/csv, application/msword, ...

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.googleworkspace.drive.Upload" required
Constant: "io.kestra.plugin.googleworkspace.drive.Upload"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fileId string

If not provided, it will create a new file

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
mimeType string

Drive will attempt to automatically detect an appropriate value from uploaded content if no value is provided. The value cannot be changed unless a new revision is uploaded. If a file is created with a Google Doc MIME type, the uploaded content will be imported if possible. The supported import formats are published here.

name string

This is not necessarily unique within a folder

parents string[]
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/drive

Default value is : - https://www.googleapis.com/auth/drive

Default:
[
  "https://www.googleapis.com/auth/drive"
]
serviceAccount string
teamDriveId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.sheets.AbstractLoad-CsvOptions object
encoding string

Default value is : UTF-8

Default: "UTF-8"
fieldDelimiter string

Default value is : ","

Default: ","
quote string
skipLeadingRows integer

The default value is 0. This property is useful if you have header rows in the file that should be skipped.

io.kestra.plugin.googleworkspace.sheets.CreateSpreadsheet object
Examples

Create a spreadsheet in Google Workspace

         id: googleworkspace_sheets_create
         namespace: company.team

         inputs:
           - id: serviceAccount
             type: STRING

         tasks:
           - id: create_spreadsheet
             type: io.kestra.plugin.googleworkspace.sheets.CreateSpreadsheet
             serviceAccount: "{{ inputs.serviceAccount }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
title string required
type const: "io.kestra.plugin.googleworkspace.sheets.CreateSpreadsheet" required
Constant: "io.kestra.plugin.googleworkspace.sheets.CreateSpreadsheet"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default:
[
  "https://www.googleapis.com/auth/spreadsheets"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.sheets.DeleteSpreadsheet object
Examples

Deletes a spreadsheet in google workspace

         id: googleworkspace_sheets_delete
         namespace: company.team

         inputs:
           - id: serviceAccount
             type: STRING

         tasks:
           - id: delete_spreadsheet
             type: io.kestra.plugin.googleworkspace.sheets.DeleteSpreadsheet
             serviceAccount: "{{ inputs.serviceAccount }}"
             spreadsheetId: "xxxxxxxxxxxxxxxx"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
spreadsheetId string required
type const: "io.kestra.plugin.googleworkspace.sheets.DeleteSpreadsheet" required
Constant: "io.kestra.plugin.googleworkspace.sheets.DeleteSpreadsheet"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default:
[
  "https://www.googleapis.com/auth/spreadsheets"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.sheets.Load object
Examples

Load data into a Google Workspace spreadsheet from an input file

   id: googleworkspace_sheets_load
namespace: company.team

inputs:
  - id: file
    type: FILE
  - id: serviceAccount
    type: STRING

tasks:
  - id: load_data
    type: io.kestra.plugin.googleworkspace.sheets.Load
    from: "{{ inputs.file }}"
    spreadsheetId: xxxxxxxxxxxxxxxxx
    range: Sheet2
    serviceAccount: "{{ inputs.serviceAccount }}"
    csvOptions:
      fieldDelimiter: ";"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
spreadsheetId string required
type const: "io.kestra.plugin.googleworkspace.sheets.Load" required
Constant: "io.kestra.plugin.googleworkspace.sheets.Load"
allowFailure boolean

Default value is : false

Default: false
avroSchema string

If provided, the task will read avro objects using this schema.

csvOptions
All of: io.kestra.plugin.googleworkspace.sheets.AbstractLoad-CsvOptions object, Csv parsing options (Optional).
description string
disabled boolean

Default value is : false

Default: false
format string

If not provided, the task will programmatically try to find the correct format based on the extension.

Values: "ION" "CSV" "AVRO" "PARQUET" "ORC" "JSON"
from string
format=uri
header boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
range string

Default value is : Sheet1

Default: "Sheet1"
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default:
[
  "https://www.googleapis.com/auth/spreadsheets"
]
serviceAccount string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.sheets.Read object
Examples
id: googleworkspace_sheets_read
namespace: company.team

tasks:
  - id: read
    type: io.kestra.plugin.googleworkspace.sheets.Read
    spreadsheetId: "1Dkd3W0OQo-wxz1rrORLP7YGSj6EBLEg74fiTdbJUIQE"
    store: true
    valueRender: FORMATTED_VALUE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
spreadsheetId string required
type const: "io.kestra.plugin.googleworkspace.sheets.Read" required
Constant: "io.kestra.plugin.googleworkspace.sheets.Read"
allowFailure boolean

Default value is : false

Default: false
dateTimeRender string

his is ignored if valueRender is FORMATTED_VALUE. More details here

Default value is : FORMATTED_STRING

Default: "FORMATTED_STRING"
Values: "SERIAL_NUMBER" "FORMATTED_STRING"
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
header boolean

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default:
[
  "https://www.googleapis.com/auth/spreadsheets"
]
selectedSheetsTitle string[]

If not provided all the sheets will be included.

serviceAccount string
store boolean

Default value is : true

Default: true
timeout string
format=duration
valueRender string

More details here

Default value is : UNFORMATTED_VALUE

Default: "UNFORMATTED_VALUE"
Values: "FORMATTED_VALUE" "UNFORMATTED_VALUE" "FORMULA"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.googleworkspace.sheets.ReadRange object
Examples
id: googleworkspace_sheets_readrange
namespace: company.team

tasks:
  - id: read_range
    type: io.kestra.plugin.googleworkspace.sheets.ReadRange
    spreadsheetId: "1Dkd3W0OQo-wxz1rrORLP7YGSj6EBLEg74fiTdbJUIQE"
    range: "Second One!A1:I"
    store: true
    valueRender: FORMATTED_VALUE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
spreadsheetId string required
type const: "io.kestra.plugin.googleworkspace.sheets.ReadRange" required
Constant: "io.kestra.plugin.googleworkspace.sheets.ReadRange"
allowFailure boolean

Default value is : false

Default: false
dateTimeRender string

his is ignored if valueRender is FORMATTED_VALUE. More details here

Default value is : FORMATTED_STRING

Default: "FORMATTED_STRING"
Values: "SERIAL_NUMBER" "FORMATTED_STRING"
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
header boolean

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
range string
readTimeout integer

Default value is : 120

Default: 120
scopes string[]

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default value is : - https://www.googleapis.com/auth/spreadsheets

Default:
[
  "https://www.googleapis.com/auth/spreadsheets"
]
serviceAccount string
store boolean

Default value is : true

Default: true
timeout string
format=duration
valueRender string

More details here

Default value is : UNFORMATTED_VALUE

Default: "UNFORMATTED_VALUE"
Values: "FORMATTED_VALUE" "UNFORMATTED_VALUE" "FORMULA"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.hightouch.Sync object
Examples
id: hightouch_sync
namespace: company.team

tasks:
  - id: sync
    type: io.kestra.plugin.hightouch.Sync
    token: YOUR_API_TOKEN
    syncId: 1127166

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
syncId integer required
token string required
type const: "io.kestra.plugin.hightouch.Sync" required
Constant: "io.kestra.plugin.hightouch.Sync"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fullResynchronization boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 300.000000000

Default: 300.0
format=duration
timeout string
format=duration
wait boolean

Allowing to capture run status and logs

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.hubspot.tickets.Create object
Examples
id: hubspot_tickets_create
namespace: company.team

tasks:
  - id: create_ticket
    type: io.kestra.plugin.hubspot.tickets.Create
    apiKey: my_api_key
    subject: "Increased 5xx in Demo Service"
    content: "The number of 5xx has increased beyond the threshold for Demo service."
    stage: 3
    priority: HIGH

Create an issue when a Kestra workflow in any namespace with company as prefix fails.

id: create_ticket_on_failure
namespace: system

tasks:
  - id: create_ticket
    type: io.kestra.plugin.hubspot.tickets.Create
    apiKey: my_api_key
    subject: Workflow failed
    content: "{{ execution.id }} has failed on {{ taskrun.startDate }}"
    stage: 3
    priority: HIGH

triggers:
  - id: on_failure
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: company
        comparison: PREFIX

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.hubspot.tickets.Create" required
Constant: "io.kestra.plugin.hubspot.tickets.Create"
allowFailure boolean

Default value is : false

Default: false
apiKey string
content string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
oauthToken string
pipeline integer
priority string

(Optional) Available values: LOW: Low priority MEDIUM: Medium priority HIGH: High priority

Values: "LOW" "MEDIUM" "HIGH"
stage integer

Default value is : 1

Default: 1
subject string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.arrowflight.Query object
Examples

Send a SQL query to a database and fetch row(s) using Apache Arrow Flight SQL driver.

id: arrow_flight_sql_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.arrowflight.Query
    url: jdbc:arrow-flight-sql://localhost:31010/?useEncryption=false
    username: db_user
    password: db_password
    sql: select * FROM departments
    fetchType: FETCH

Send a SQL query to a Dremio coordinator and fetch rows as output using Apache Arrow Flight SQL driver.

id: arrow_flight_sql_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.arrowflight.Query
    url: jdbc:arrow-flight-sql://dremio-coordinator:32010/?schema=postgres.public
    username: dremio_user
    password: dremio_password
    sql: select * FROM departments
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.arrowflight.Query" required
Constant: "io.kestra.plugin.jdbc.arrowflight.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.arrowflight.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.arrowflight.Trigger
    username: dremio_user
    password: dremio_password
    url: jdbc:arrow-flight-sql://dremio-coordinator:32010/?schema=postgres.public
    interval: "PT5M"
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.arrowflight.Trigger" required
Constant: "io.kestra.plugin.jdbc.arrowflight.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.as400.Query object
Examples

Send a SQL query to a AS400 Database and fetch a row as output.

id: as400_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.as400.Query
    url: jdbc:as400://127.0.0.1:50000/
    username: as400_user
    password: as400_password
    sql: select * from as400_types
    fetchType: FETCH_ONE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.as400.Query" required
Constant: "io.kestra.plugin.jdbc.as400.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.as400.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.as400.Trigger
    interval: "PT5M"
    url: jdbc:as400://127.0.0.1:50000/
    username: as400_user
    password: as400_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.as400.Trigger" required
Constant: "io.kestra.plugin.jdbc.as400.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.clickhouse.BulkInsert object
Examples

Insert rows from another table to a Clickhouse database using asynchronous inserts.

id: clickhouse_bulk_insert
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: bulk_insert
    type: io.kestra.plugin.jdbc.clickhouse.BulkInsert
    from: "{{ inputs.file }}"
    url: jdbc:clickhouse://127.0.0.1:56982/
    username: ch_user
    password: ch_password
    sql: INSERT INTO YourTable SETTINGS async_insert=1, wait_for_async_insert=1 values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )

Insert data into specific columns via a SQL query to a ClickHouse database using asynchronous inserts.

id: clickhouse_bulk_insert
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: bulk_insert
    type: io.kestra.plugin.jdbc.clickhouse.BulkInsert
    from: "{{ inputs.file }}"
    url: jdbc:clickhouse://127.0.0.1:56982/
    username: ch_user
    password: ch_password
    sql: INSERT INTO YourTable ( field1, field2, field3 ) SETTINGS async_insert=1, wait_for_async_insert=1 values( ?, ?, ? )

Insert data into specific columns via a SQL query to a ClickHouse database using asynchronous inserts.

id: clickhouse_bulk_insert
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: bulk_insert
    type: io.kestra.plugin.jdbc.clickhouse.BulkInsert
    from: "{{ inputs.file }}"
    url: jdbc:clickhouse://127.0.0.1:56982/
    username: ch_user
    password: ch_password
    table: YourTable

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sql string required

The query must have as many question marks as the number of columns in the table. Example: 'insert into <table_name> values( ? , ? , ? )' for 3 columns. In case you do not want all columns, you need to specify it in the query in the columns property Example: 'insert into <table_name> (id, name) values( ? , ? )' for inserting data into 2 columns: 'id' and 'name'.

type const: "io.kestra.plugin.jdbc.clickhouse.BulkInsert" required
Constant: "io.kestra.plugin.jdbc.clickhouse.BulkInsert"
url string required
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
columns string[]

If not provided, ? count need to match the from number of columns.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
table string

This property specifies the table name which will be used to retrieve the columns for the inserted values. You can use it instead of specifying manually the columns in the columns property. In this case, the sql property can also be omitted, an INSERT statement would be generated automatically.

timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.clickhouse.ClickHouseLocalCLI object
Examples

Run clickhouse-local commands

id: clickhouse-local
namespace: company.team
tasks:
  - id: query
    type: io.kestra.plugin.clickhouse.ClickHouseLocalCLI
    commands:
      - SELECT count() FROM s3('https://datasets-documentation.s3.eu-west-3.amazonaws.com/house_parquet/house_0.parquet')

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.clickhouse.ClickHouseLocalCLI" required
Constant: "io.kestra.plugin.jdbc.clickhouse.ClickHouseLocalCLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : clickhouse/clickhouse-server:latest

Default: "clickhouse/clickhouse-server:latest"
description string
disabled boolean

Default value is : false

Default: false
env object
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.clickhouse.Query object
Examples

Query a Clickhouse database.

id: clickhouse_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.clickhouse.Query
    url: jdbc:clickhouse://127.0.0.1:56982/
    username: ch_user
    password: ch_password
    sql: select * from clickhouse_types
    fetchType: STORE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.clickhouse.Query" required
Constant: "io.kestra.plugin.jdbc.clickhouse.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.clickhouse.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.clickhouse.Trigger
    interval: "PT5M"
    url: jdbc:clickhouse://127.0.0.1:56982/
    username: ch_user
    password: ch_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.clickhouse.Trigger" required
Constant: "io.kestra.plugin.jdbc.clickhouse.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.db2.Query object
Examples

Send a SQL query to a DB2 Database and fetch a row as output.

id: db2_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.db2.Query
    url: jdbc:db2://127.0.0.1:50000/
    username: db2inst
    password: db2_password
    sql: select * from db2_types
    fetchType: FETCH_ONE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.db2.Query" required
Constant: "io.kestra.plugin.jdbc.db2.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.db2.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.db2.Trigger
    interval: "PT5M"
    url: jdbc:db2://127.0.0.1:50000/
    username: db2inst
    password: db2_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.db2.Trigger" required
Constant: "io.kestra.plugin.jdbc.db2.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.dremio.Query object
Examples

Send a SQL query to a Dremio database and fetch a row as output.

id: dremio_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.dremio.Query
    url: jdbc:dremio:direct=sql.dremio.cloud:443;ssl=true;PROJECT_ID=sampleProjectId;
    username: dremio_token
    password: samplePersonalAccessToken
    sql: select * FROM source.database.table
    fetchType: FETCH_ONE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.dremio.Query" required
Constant: "io.kestra.plugin.jdbc.dremio.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.dremio.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.dremio.Trigger
    interval: "PT5M"
    url: jdbc:dremio:direct=sql.dremio.cloud:443;ssl=true;PROJECT_ID=sampleProjectId;
    username: dremio_token
    password: samplePersonalAccessToken
    sql: "SELECT * FROM source.database.my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.dremio.Trigger" required
Constant: "io.kestra.plugin.jdbc.dremio.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.druid.Query object
Examples

Query an Apache Druid database.

id: druid_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.druid.Query
    url: jdbc:avatica:remote:url=http://localhost:8888/druid/v2/sql/avatica/;transparent_reconnection=true
    sql: |
      SELECT *
      FROM wikiticker
    fetchType: STORE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.druid.Query" required
Constant: "io.kestra.plugin.jdbc.druid.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.druid.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.druid.Trigger
    interval: "PT5M"
    url: jdbc:avatica:remote:url=http://localhost:8888/druid/v2/sql/avatica/;transparent_reconnection=true
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.druid.Trigger" required
Constant: "io.kestra.plugin.jdbc.druid.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.duckdb.Query object
Examples

Execute a query that reads a csv, and outputs another csv.

id: query_duckdb
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: "https://huggingface.co/datasets/kestra/datasets/raw/main/csv/orders.csv"

  - id: query
    type: io.kestra.plugin.jdbc.duckdb.Query
    url: 'jdbc:duckdb:'
    timeZoneId: Europe/Paris
    sql: |-
      CREATE TABLE new_tbl AS SELECT * FROM read_csv_auto('{{ workingDir }}/in.csv', header=True);

      COPY (SELECT order_id, customer_name FROM new_tbl) TO '{{ outputFiles.out }}' (HEADER, DELIMITER ',');
    inputFiles:
      in.csv: "{{ outputs.http_download.uri }}"
    outputFiles:
       - out

Execute a query that reads from an existing database file using a URL.

id: query_duckdb
namespace: company.team

tasks:
  - id: query1
    type: io.kestra.plugin.jdbc.duckdb.Query
    url: jdbc:duckdb:/{{ vars.dbfile }}
    sql: SELECT * FROM table_name;
    fetchType: STORE

  - id: query2
    type: io.kestra.plugin.jdbc.duckdb.Query
    url: jdbc:duckdb:/temp/folder/duck.db
    sql: SELECT * FROM table_name;
    fetchType: STORE

Execute a query that reads from an existing database file using the databaseFile variable.

id: query_duckdb
namespace: company.team

tasks:
  - id: query1
    type: io.kestra.plugin.jdbc.duckdb.Query
    url: jdbc:duckdb:
    databaseFile: {{ vars.dbfile }}
    sql: SELECT * FROM table_name;
    fetchType: STORE

  - id: query2
    type: io.kestra.plugin.jdbc.duckdb.Query
    url: jdbc:duckdb:
    databaseFile: /temp/folder/duck.db
    sql: SELECT * FROM table_name;
    fetchType: STORE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.duckdb.Query" required
Constant: "io.kestra.plugin.jdbc.duckdb.Query"
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
inputFiles Record<string, string>

Describe a files map that will be written and usable by DuckDb. You can reach files using a workingDir variable, example: SELECT * FROM read_csv_auto('{{ workingDir }}/myfile.csv');

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
outputFiles string[]

List of keys that will generate temporary files. On the SQL query, you can just use a variable named outputFiles.key for the corresponding file. If you add a file with ["first"], you can use the special vars COPY tbl TO '{{ outputFiles.first }}' (HEADER, DELIMITER ','); and use this file in others tasks using {{ outputs.taskId.outputFiles.first }}.

password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
url string

The default value, jdbc:duckdb:, will use a local in-memory database. Set this property when connecting to a persisted database instance, for example jdbc:duckdb:md:my_database?motherduck_token=<my_token> to connect to MotherDuck.

Default value is : "jdbc:duckdb:"

Default: "jdbc:duckdb:"
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.duckdb.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.duckdb.Trigger
    interval: "PT5M"
    url: 'jdbc:duckdb:'
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.duckdb.Trigger" required
Constant: "io.kestra.plugin.jdbc.duckdb.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
url string

Default value is : jdbc:duckdb:null

Default: "jdbc:duckdb:null"
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.mysql.Batch object
Examples

Fetch rows from a table, and bulk insert them to another one.

id: mysql_batch
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.mysql.Query
    url: jdbc:mysql://127.0.0.1:3306/
    username: mysql_user
    password: mysql_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.mysql.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:mysql://127.0.0.1:3306/
    username: mysql_user
    password: mysql_password
    sql: |
      insert into xref values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )

Fetch rows from a table, and bulk insert them to another one, without using sql query.

id: mysql_batch
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.mysql.Query
    url: jdbc:mysql://127.0.0.1:3306/
    username: mysql_user
    password: mysql_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.mysql.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:mysql://127.0.0.1:3306/
    username: mysql_user
    password: mysql_password
    table: xref

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sql string required

The query must have as many question marks as the number of columns in the table. Example: 'insert into <table_name> values( ? , ? , ? )' for 3 columns. In case you do not want all columns, you need to specify it in the query in the columns property Example: 'insert into <table_name> (id, name) values( ? , ? )' for inserting data into 2 columns: 'id' and 'name'.

type const: "io.kestra.plugin.jdbc.mysql.Batch" required
Constant: "io.kestra.plugin.jdbc.mysql.Batch"
url string required
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
columns string[]

If not provided, ? count need to match the from number of columns.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
table string

This property specifies the table name which will be used to retrieve the columns for the inserted values. You can use it instead of specifying manually the columns in the columns property. In this case, the sql property can also be omitted, an INSERT statement would be generated automatically.

timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.mysql.Query object
Examples

Send a SQL query to a MySQL Database and fetch a row as output.

id: mysql_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.mysql.Query
    url: jdbc:mysql://127.0.0.1:3306/
    username: mysql_user
    password: mysql_password
    sql: select * from mysql_types
    fetchType: FETCH_ONE

Load a csv file into a MySQL table.

id: mysql_query
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/products.csv

  - id: query
    type: io.kestra.plugin.jdbc.mysql.Query
    url: jdbc:mysql://127.0.0.1:3306/
    username: mysql_user
    password: mysql_password
    inputFile: "{{ outputs.http_download.uri }}"
    sql: |
      LOAD DATA LOCAL INFILE '{{ inputFile }}'
      INTO TABLE products
      FIELDS TERMINATED BY ','
      ENCLOSED BY '"'
      LINES TERMINATED BY '\n'
      IGNORE 1 ROWS;

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.mysql.Query" required
Constant: "io.kestra.plugin.jdbc.mysql.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
inputFile string

The file must be from Kestra's internal storage

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.mysql.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.mysql.Trigger
    interval: "PT5M"
    url: jdbc:mysql://127.0.0.1:3306/
    username: mysql_user
    password: mysql_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.mysql.Trigger" required
Constant: "io.kestra.plugin.jdbc.mysql.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.oracle.Batch object
Examples

Fetch rows from a table and bulk insert to another one

id: oracle_batch
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.oracle.Query
    url: jdbc:oracle:thin:@dev:49161:XE
    username: oracle
    password: oracle_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.oracle.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:oracle:thin:@prod:49161:XE
    username: oracle
    password: oracle_password
    sql: |
      insert into xref values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )

Fetch rows from a table and bulk insert to another one, without using sql query

id: oracle_batch
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.oracle.Query
    url: jdbc:oracle:thin:@dev:49161:XE
    username: oracle
    password: oracle_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.oracle.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:oracle:thin:@prod:49161:XE
    username: oracle
    password: oracle_password
    table: XREF

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sql string required

The query must have as many question marks as the number of columns in the table. Example: 'insert into <table_name> values( ? , ? , ? )' for 3 columns. In case you do not want all columns, you need to specify it in the query in the columns property Example: 'insert into <table_name> (id, name) values( ? , ? )' for inserting data into 2 columns: 'id' and 'name'.

type const: "io.kestra.plugin.jdbc.oracle.Batch" required
Constant: "io.kestra.plugin.jdbc.oracle.Batch"
url string required
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
columns string[]

If not provided, ? count need to match the from number of columns.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
table string

This property specifies the table name which will be used to retrieve the columns for the inserted values. You can use it instead of specifying manually the columns in the columns property. In this case, the sql property can also be omitted, an INSERT statement would be generated automatically.

timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.oracle.Query object
Examples

Execute a query and fetch results on another task to update another table.

id: oracle_query
namespace: company.team

tasks:
  - id: select
    type: io.kestra.plugin.jdbc.oracle.Query
    url: jdbc:oracle:thin:@localhost:49161:XE
    username: oracle_user
    password: oracle_password
    sql: select * from source
    fetchType: FETCH

  - id: generate_update
    type: io.kestra.plugin.jdbc.oracle.Query
    url: jdbc:oracle:thin:@localhost:49161:XE
    username: oracle_user
    password: oracle_password
    sql: "{% for row in outputs.select.rows %} INSERT INTO destination (year_month, store_code, update_date) values ({{ row.year_month }}, {{ row.store_code }}, TO_DATE('{{ row.date }}', 'MONTH DD, YYYY') ); {% endfor %}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.oracle.Query" required
Constant: "io.kestra.plugin.jdbc.oracle.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.oracle.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.oracle.Trigger
    interval: "PT5M"
    url: jdbc:oracle:thin:@localhost:49161:XE
    username: oracle_user
    password: oracle_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.oracle.Trigger" required
Constant: "io.kestra.plugin.jdbc.oracle.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.pinot.Query object
Examples
id: pinot_query
namespace: company.team

tasks:
  - id: query
    type: o.kestra.plugin.jdbc.pinot.Query
    url: jdbc:pinot://localhost:9000
    sql: |
      SELECT *
      FROM airlineStats
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.pinot.Query" required
Constant: "io.kestra.plugin.jdbc.pinot.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.pinot.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.pinot.Trigger
    interval: "PT5M"
    url: jdbc:pinot://localhost:9000
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.pinot.Trigger" required
Constant: "io.kestra.plugin.jdbc.pinot.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.postgresql.Batch object
Examples

Fetch rows from a table, and bulk insert them to another one.

id: postgres_bulk_insert
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.postgresql.Query
    url: jdbc:postgresql://dev:56982/
    username: pg_user
    password: pg_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.postgresql.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:postgresql://prod:56982/
    username: pg_user
    password: pg_password
    sql: |
      insert into xref values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )

Fetch rows from a table, and bulk insert them to another one, without using sql query.

id: postgres_bulk_insert
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.postgresql.Query
    url: jdbc:postgresql://dev:56982/
    username: pg_user
    password: pg_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.postgresql.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:postgresql://prod:56982/
    username: pg_user
    password: pg_password
    table: xre

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sql string required

The query must have as many question marks as the number of columns in the table. Example: 'insert into <table_name> values( ? , ? , ? )' for 3 columns. In case you do not want all columns, you need to specify it in the query in the columns property Example: 'insert into <table_name> (id, name) values( ? , ? )' for inserting data into 2 columns: 'id' and 'name'.

type const: "io.kestra.plugin.jdbc.postgresql.Batch" required
Constant: "io.kestra.plugin.jdbc.postgresql.Batch"
url string required
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
columns string[]

If not provided, ? count need to match the from number of columns.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
ssl boolean

Default value is : false

Default: false
sslCert string

Must be a PEM encoded certificate

sslKey string

Must be a PEM encoded key

sslKeyPassword string
sslMode string
Values: "DISABLE" "ALLOW" "PREFER" "REQUIRE" "VERIFY_CA" "VERIFY_FULL"
sslRootCert string

Must be a PEM encoded certificate

table string

This property specifies the table name which will be used to retrieve the columns for the inserted values. You can use it instead of specifying manually the columns in the columns property. In this case, the sql property can also be omitted, an INSERT statement would be generated automatically.

timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.postgresql.CopyIn object

Copies in CSV, Text, or Binary data into PostgreSQL table.##### Examples

Load CSV data into a PostgreSQL table.

id: postgres_copy_in
namespace: company.team

tasks:
  - id: copy_in
    type: io.kestra.plugin.jdbc.postgresql.CopyIn
    url: jdbc:postgresql://127.0.0.1:56982/
    username: pg_user
    password: pg_password
    format: CSV
    from: "{{ outputs.export.uri }}"
    table: my_destination_table
    header: true
    delimiter: "\t"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.postgresql.CopyIn" required
Constant: "io.kestra.plugin.jdbc.postgresql.CopyIn"
url string required
allowFailure boolean

Default value is : false

Default: false
columns string[]

If no column list is specified, all columns of the table will be copied.

delimiter string

The default is a tab character in text format, a comma in CSV format. This must be a single one-byte character. This option is not allowed when using binary.

description string
disabled boolean

Default value is : false

Default: false
encoding string

If this option is omitted, the current client encoding is used. See the Notes below for more details.

escape string

The default is the same as the QUOTE value (so that the quoting character is doubled if it appears in the data). This must be a single one-byte character. This option is allowed only when using CSV format.

forceNotNull string[]

In the default case where the null string is empty, this means that empty values will be read as zero-length strings rather than nulls, even when they are not quoted. This option is allowed only in COPY FROM, and only when using CSV format.

forceNull string[]

In the default case where the null string is empty, this converts a quoted empty string into NULL. This option is allowed only in COPY FROM, and only when using CSV format.

forceQuote string[]

NULL output is never quoted. If * is specified, non-NULL values will be quoted in all columns. This option is allowed only in COPY TO, and only when using CSV format.

format string

Default value is : TEXT

Default: "TEXT"
Values: "TEXT" "CSV" "BINARY"
freeze boolean

This is intended as a performance option for initial data loading. Rows will be frozen only if the table being loaded has been created or truncated in the current sub-transaction, there are no cursors open and there are no older snapshots held by this transaction. It is currently not possible to perform a COPY FREEZE on a partitioned table.

Note that all other sessions will immediately be able to see the data once it has been successfully loaded. This violates the normal rules of MVCC visibility and users specifying should be aware of the potential problems this might cause.

header boolean

On output, the first line contains the column names from the table, and on input, the first line is ignored. This option is allowed only when using CSV.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
nullString string

The default is \N (backslash-N) in text format, and an unquoted empty string in CSV format. You might prefer an empty string even in text format for cases where you don't want to distinguish nulls from empty strings. This option is not allowed when using binary format.

oids boolean

An error is raised if OIDs is specified for a table that does not have OIDs, or in the case of copying a query.

password string
quote string

The default is double-quote. This must be a single one-byte character. This option is allowed only when using CSV format.

ssl boolean

Default value is : false

Default: false
sslCert string

Must be a PEM encoded certificate

sslKey string

Must be a PEM encoded key

sslKeyPassword string
sslMode string
Values: "DISABLE" "ALLOW" "PREFER" "REQUIRE" "VERIFY_CA" "VERIFY_FULL"
sslRootCert string

Must be a PEM encoded certificate

table string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.postgresql.CopyOut object
Examples

Export a PostgreSQL table or query to a CSV or TSV file.

id: postgres_copy_out
namespace: company.team

tasks:
  - id: copy_out
    type: io.kestra.plugin.jdbc.postgresql.CopyOut
    url: jdbc:postgresql://127.0.0.1:56982/
    username: pg_user
    password: pg_password
    format: CSV
    sql: SELECT 1 AS int, 't'::bool AS bool UNION SELECT 2 AS int, 'f'::bool AS bool
    header: true
    delimiter: "\t"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.postgresql.CopyOut" required
Constant: "io.kestra.plugin.jdbc.postgresql.CopyOut"
url string required
allowFailure boolean

Default value is : false

Default: false
columns string[]

If no column list is specified, all columns of the table will be copied.

delimiter string

The default is a tab character in text format, a comma in CSV format. This must be a single one-byte character. This option is not allowed when using binary.

description string
disabled boolean

Default value is : false

Default: false
encoding string

If this option is omitted, the current client encoding is used. See the Notes below for more details.

escape string

The default is the same as the QUOTE value (so that the quoting character is doubled if it appears in the data). This must be a single one-byte character. This option is allowed only when using CSV format.

forceNotNull string[]

In the default case where the null string is empty, this means that empty values will be read as zero-length strings rather than nulls, even when they are not quoted. This option is allowed only in COPY FROM, and only when using CSV format.

forceNull string[]

In the default case where the null string is empty, this converts a quoted empty string into NULL. This option is allowed only in COPY FROM, and only when using CSV format.

forceQuote string[]

NULL output is never quoted. If * is specified, non-NULL values will be quoted in all columns. This option is allowed only in COPY TO, and only when using CSV format.

format string

Default value is : TEXT

Default: "TEXT"
Values: "TEXT" "CSV" "BINARY"
freeze boolean

This is intended as a performance option for initial data loading. Rows will be frozen only if the table being loaded has been created or truncated in the current sub-transaction, there are no cursors open and there are no older snapshots held by this transaction. It is currently not possible to perform a COPY FREEZE on a partitioned table.

Note that all other sessions will immediately be able to see the data once it has been successfully loaded. This violates the normal rules of MVCC visibility and users specifying should be aware of the potential problems this might cause.

header boolean

On output, the first line contains the column names from the table, and on input, the first line is ignored. This option is allowed only when using CSV.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
nullString string

The default is \N (backslash-N) in text format, and an unquoted empty string in CSV format. You might prefer an empty string even in text format for cases where you don't want to distinguish nulls from empty strings. This option is not allowed when using binary format.

oids boolean

An error is raised if OIDs is specified for a table that does not have OIDs, or in the case of copying a query.

password string
quote string

The default is double-quote. This must be a single one-byte character. This option is allowed only when using CSV format.

sql string

For INSERT, UPDATE and DELETE queries a RETURNING clause must be provided, and the target relation must not have a conditional rule, nor an ALSO rule, nor an INSTEAD rule that expands to multiple statements.

ssl boolean

Default value is : false

Default: false
sslCert string

Must be a PEM encoded certificate

sslKey string

Must be a PEM encoded key

sslKeyPassword string
sslMode string
Values: "DISABLE" "ALLOW" "PREFER" "REQUIRE" "VERIFY_CA" "VERIFY_FULL"
sslRootCert string

Must be a PEM encoded certificate

table string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.postgresql.Query object
Examples

Execute a query and fetch results in a task, and update another table with fetched results in a different task.

id: postgres_query
namespace: company.team

tasks:
  - id: fetch
    type: io.kestra.plugin.jdbc.postgresql.Query
    url: jdbc:postgresql://127.0.0.1:56982/
    username: pg_user
    password: pg_password
    sql: select concert_id, available, a, b, c, d, play_time, library_record, floatn_test, double_test, real_test, numeric_test, date_type, time_type, timez_type, timestamp_type, timestampz_type, interval_type, pay_by_quarter, schedule, json_type, blob_type from pgsql_types
    fetchType: FETCH

  - id: use_fetched_data
    type: io.kestra.plugin.jdbc.postgresql.Query
    url: jdbc:postgresql://127.0.0.1:56982/
    username: pg_user
    password: pg_password
    sql:  "{% for row in outputs.fetch.rows %} INSERT INTO pl_store_distribute (year_month,store_code, update_date) values ({{row.play_time}}, {{row.concert_id}}, TO_TIMESTAMP('{{row.timestamp_type}}', 'YYYY-MM-DDTHH:MI:SS.US') ); {% endfor %}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.postgresql.Query" required
Constant: "io.kestra.plugin.jdbc.postgresql.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
ssl boolean

Default value is : false

Default: false
sslCert string

Must be a PEM encoded certificate

sslKey string

Must be a PEM encoded key

sslKeyPassword string
sslMode string
Values: "DISABLE" "ALLOW" "PREFER" "REQUIRE" "VERIFY_CA" "VERIFY_FULL"
sslRootCert string

Must be a PEM encoded certificate

store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.postgresql.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.postgresql.Trigger
    interval: "PT5M"
    url: jdbc:postgresql://127.0.0.1:56982/
    username: pg_user
    password: pg_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.postgresql.Trigger" required
Constant: "io.kestra.plugin.jdbc.postgresql.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
ssl boolean

Default value is : false

Default: false
sslCert string

Must be a PEM encoded certificate

sslKey string

Must be a PEM encoded key

sslKeyPassword string
sslMode string
Values: "DISABLE" "ALLOW" "PREFER" "REQUIRE" "VERIFY_CA" "VERIFY_FULL"
sslRootCert string

Must be a PEM encoded certificate

stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.redshift.Query object
Examples

Send a SQL query to a Redshift database and fetch a row as output.

id: redshift_query
namespace: company.team

tasks:
  - id: select
    type: io.kestra.plugin.jdbc.redshift.Query
    url: jdbc:redshift://123456789.eu-central-1.redshift-serverless.amazonaws.com:5439/dev
    username: admin
    password: admin_password
    sql: select * from redshift_types
    fetchType: FETCH_ONE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.redshift.Query" required
Constant: "io.kestra.plugin.jdbc.redshift.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.redshift.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.redshift.Trigger
    interval: "PT5M"
    url: jdbc:redshift://123456789.eu-central-1.redshift-serverless.amazonaws.com:5439/dev
    username: admin
    password: admin_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.redshift.Trigger" required
Constant: "io.kestra.plugin.jdbc.redshift.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.snowflake.Download object
Examples
id: snowflake_download
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.jdbc.snowflake.Download
    url: jdbc:snowflake://<account_identifier>.snowflakecomputing.com
    username: snowflake_user
    password: snowflake_password
    stageName: "@demo_db.public.%myStage"
    fileName: prefix/destFile.csv

fileName string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
stageName string required

~ or table name or stage name.

type const: "io.kestra.plugin.jdbc.snowflake.Download" required
Constant: "io.kestra.plugin.jdbc.snowflake.Download"
url string required
allowFailure boolean

Default value is : false

Default: false
compress boolean

Default value is : true

Default: true
database string

The specified database should be an existing database for which the specified default role has privileges. If you need to use a different database after connecting, execute the USE DATABASE command.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
privateKey string

It needs to be an un-encoded private key in plaintext.

privateKeyFile string

It needs to be the path on the host where the private key file is located.

privateKeyFilePassword string
role string

The specified role should be an existing role that has already been assigned to the specified user for the driver. If the specified role has not already been assigned to the user, the role is not used when the session is initiated by the driver. If you need to use a different role after connecting, execute the USE ROLE command.

schema string

The specified schema should be an existing schema for which the specified default role has privileges. If you need to use a different schema after connecting, execute the USE SCHEMA command.

timeout string
format=duration
username string
warehouse string

The specified warehouse should be an existing warehouse for which the specified default role has privileges. If you need to use a different warehouse after connecting, execute the USE WAREHOUSE command to set a different warehouse for the session.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.snowflake.Query object
Examples

Execute a query and fetch results in a task, and update another table with fetched results in a different task.

id: snowflake_query
namespace: company.team

tasks:
  - id: select
    type: io.kestra.plugin.jdbc.snowflake.Query
    url: jdbc:snowflake://<account_identifier>.snowflakecomputing.com
    username: snowflake_user
    password: snowflake_password
    sql: select * from demo_db.public.customers
    fetchType: FETCH

  - id: generate_update
    type: io.kestra.plugin.jdbc.snowflake.Query
    url: jdbc:snowflake://<account_identifier>.snowflakecomputing.com
    username: snowflake_user
    password: snowflake_password
    sql: "INSERT INTO demo_db.public.customers_new (year_month, store_code, update_date) values {% for row in outputs.update.rows %} ({{ row.year_month }}, {{ row.store_code }}, TO_DATE('{{ row.date }}', 'MONTH DD, YYYY') ) {% if not loop.last %}, {% endif %}; {% endfor %}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.snowflake.Query" required
Constant: "io.kestra.plugin.jdbc.snowflake.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
database string

The specified database should be an existing database for which the specified default role has privileges. If you need to use a different database after connecting, execute the USE DATABASE command.

description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
privateKey string

It needs to be an un-encoded private key in plaintext.

privateKeyFile string

It needs to be the path on the host where the private key file is located.

privateKeyFilePassword string
role string

The specified role should be an existing role that has already been assigned to the specified user for the driver. If the specified role has not already been assigned to the user, the role is not used when the session is initiated by the driver. If you need to use a different role after connecting, execute the USE ROLE command.

schema string

The specified schema should be an existing schema for which the specified default role has privileges. If you need to use a different schema after connecting, execute the USE SCHEMA command.

sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
warehouse string

The specified warehouse should be an existing warehouse for which the specified default role has privileges. If you need to use a different warehouse after connecting, execute the USE WAREHOUSE command to set a different warehouse for the session.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.snowflake.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.snowflake.Trigger
    interval: "PT5M"
    url: jdbc:snowflake://<account_identifier>.snowflakecomputing.com
    username: snowflake_user
    password: snowflake_password
    sql: "SELECT * FROM demo_db.public.customers"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.snowflake.Trigger" required
Constant: "io.kestra.plugin.jdbc.snowflake.Trigger"
url string required
conditions array
database string

The specified database should be an existing database for which the specified default role has privileges. If you need to use a different database after connecting, execute the USE DATABASE command.

description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
privateKey string

It needs to be an un-encoded private key in plaintext.

privateKeyFile string

It needs to be the path on the host where the private key file is located.

privateKeyFilePassword string
role string

The specified role should be an existing role that has already been assigned to the specified user for the driver. If the specified role has not already been assigned to the user, the role is not used when the session is initiated by the driver. If you need to use a different role after connecting, execute the USE ROLE command.

schema string

The specified schema should be an existing schema for which the specified default role has privileges. If you need to use a different schema after connecting, execute the USE SCHEMA command.

sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
warehouse string

The specified warehouse should be an existing warehouse for which the specified default role has privileges. If you need to use a different warehouse after connecting, execute the USE WAREHOUSE command to set a different warehouse for the session.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.snowflake.Upload object
Examples
id: snowflake_upload
namespace: company.team

tasks:
  - id: upload
    type: io.kestra.plugin.jdbc.snowflake.Upload
    url: jdbc:snowflake://<account_identifier>.snowflakecomputing.com
    username: snowflake_user
    password: snowflake_password
    from: '{{ outputs.extract.uri }}'
    fileName: data.csv
    prefix: raw
    stageName: "@demo_db.public.%myStage"

fileName string required
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
prefix string required
stageName string required

This can either be a stage name or a table name.

type const: "io.kestra.plugin.jdbc.snowflake.Upload" required
Constant: "io.kestra.plugin.jdbc.snowflake.Upload"
url string required
allowFailure boolean

Default value is : false

Default: false
compress boolean

Default value is : true

Default: true
database string

The specified database should be an existing database for which the specified default role has privileges. If you need to use a different database after connecting, execute the USE DATABASE command.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
privateKey string

It needs to be an un-encoded private key in plaintext.

privateKeyFile string

It needs to be the path on the host where the private key file is located.

privateKeyFilePassword string
role string

The specified role should be an existing role that has already been assigned to the specified user for the driver. If the specified role has not already been assigned to the user, the role is not used when the session is initiated by the driver. If you need to use a different role after connecting, execute the USE ROLE command.

schema string

The specified schema should be an existing schema for which the specified default role has privileges. If you need to use a different schema after connecting, execute the USE SCHEMA command.

timeout string
format=duration
username string
warehouse string

The specified warehouse should be an existing warehouse for which the specified default role has privileges. If you need to use a different warehouse after connecting, execute the USE WAREHOUSE command to set a different warehouse for the session.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.sqlite.Query object
Examples

Execute a query and pass the results to another task.

id: sqlite_query
namespace: company.team

tasks:
  - id: update
    type: io.kestra.plugin.jdbc.sqlite.Query
    url: jdbc:sqlite:myfile.db
    sql: select concert_id, available, a, b, c, d, play_time, library_record, floatn_test, double_test, real_test, numeric_test, date_type, time_type, timez_type, timestamp_type, timestampz_type, interval_type, pay_by_quarter, schedule, json_type, blob_type from pgsql_types
    fetchType: FETCH

  - id: use_fetched_data
    type: io.kestra.plugin.jdbc.sqlite.Query
    url: jdbc:sqlite:myfile.db
    sql: "{% for row in outputs.update.rows %} INSERT INTO pl_store_distribute (year_month,store_code, update_date) values ({{row.play_time}}, {{row.concert_id}}, TO_TIMESTAMP('{{row.timestamp_type}}', 'YYYY-MM-DDTHH:MI:SS.US') ); {% endfor %}"

Execute a query, using existing sqlite file, and pass the results to another task.

id: sqlite_query_using_file
namespace: company.team

tasks:
  - id: update
    type: io.kestra.plugin.jdbc.sqlite.Query
    url: jdbc:sqlite:myfile.db
    sqliteFile: {{ outputs.get.outputFiles['myfile.sqlite'] }}
    sql: select * from pgsql_types
    fetchType: FETCH

  - id: use_fetched_data
    type: io.kestra.plugin.jdbc.sqlite.Query
    url: jdbc:sqlite:myfile.db
    sqliteFile: {{ outputs.get.outputFiles['myfile.sqlite'] }}
    sql: "{% for row in outputs.update.rows %} INSERT INTO pl_store_distribute (year_month,store_code, update_date) values ({{row.play_time}}, {{row.concert_id}}, TO_TIMESTAMP('{{row.timestamp_type}}', 'YYYY-MM-DDTHH:MI:SS.US') ); {% endfor %}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.sqlite.Query" required
Constant: "io.kestra.plugin.jdbc.sqlite.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
sqliteFile string

The file must be from Kestra's internal storage

store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.sqlite.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.sqlite.Trigger
    interval: "PT5M"
    url: jdbc:sqlite:myfile.db
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.sqlite.Trigger" required
Constant: "io.kestra.plugin.jdbc.sqlite.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.sqlserver.Batch object
Examples

Fetch rows from a table and bulk insert to another one.

id: sqlserver_batch_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.sqlserver.Query
    url: jdbc:sqlserver://dev:41433;trustServerCertificate=true
    username: sql_server_user
    password: sql_server_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.sqlserver.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:sqlserver://prod:41433;trustServerCertificate=true
    username: sql_server_user
    password: sql_server_password
    sql: |
      insert into xref values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )

Fetch rows from a table and bulk insert to another one, without using sql query.

id: sqlserver_batch_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.sqlserver.Query
    url: jdbc:sqlserver://dev:41433;trustServerCertificate=true
    username: sql_server_user
    password: sql_server_passwd
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.sqlserver.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:sqlserver://prod:41433;trustServerCertificate=true
    username: sql_server_user
    password: sql_server_passwd
    table: xref
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sql string required

The query must have as many question marks as the number of columns in the table. Example: 'insert into <table_name> values( ? , ? , ? )' for 3 columns. In case you do not want all columns, you need to specify it in the query in the columns property Example: 'insert into <table_name> (id, name) values( ? , ? )' for inserting data into 2 columns: 'id' and 'name'.

type const: "io.kestra.plugin.jdbc.sqlserver.Batch" required
Constant: "io.kestra.plugin.jdbc.sqlserver.Batch"
url string required
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
columns string[]

If not provided, ? count need to match the from number of columns.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
table string

This property specifies the table name which will be used to retrieve the columns for the inserted values. You can use it instead of specifying manually the columns in the columns property. In this case, the sql property can also be omitted, an INSERT statement would be generated automatically.

timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.sqlserver.Query object
Examples

Execute a query and fetch results in a task, and update another table with fetched results in a different task.

id: sqlserver_query
namespace: company.team

tasks:
  - id: select
    type: io.kestra.plugin.jdbc.sqlserver.Query
    url: jdbc:sqlserver://localhost:41433;trustServerCertificate=true
    username: sql_server_user
    password: sql_server_password
    sql: select * from source
    fetchType: FETCH

  - id: generate_update
    type: io.kestra.plugin.jdbc.sqlserver.Query
    url: jdbc:sqlserver://localhost:41433;trustServerCertificate=true
    username: sql_server_user
    password: sql_server_password
    sql: "{% for row in outputs.update.rows %} INSERT INTO destination (year_month, store_code, update_date) values ({{row.year_month}}, {{row.store_code}}, '{{row.date}}'); {% endfor %}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.sqlserver.Query" required
Constant: "io.kestra.plugin.jdbc.sqlserver.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.sqlserver.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.sqlserver.Trigger
    interval: "PT5M"
    url: jdbc:sqlserver://localhost:41433;trustServerCertificate=true
    username: sql_server_user
    password: sql_server_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.sqlserver.Trigger" required
Constant: "io.kestra.plugin.jdbc.sqlserver.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.sybase.Query object
Examples

Send a SQL query to a Sybase Database and fetch a row as output.

id: sybase_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.sybase.Query
    url: jdbc:sybase:Tds:127.0.0.1:5000/
    username: syb_user
    password: syb_password
    sql: select * from syb_types
    fetchType: FETCH_ONE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.sybase.Query" required
Constant: "io.kestra.plugin.jdbc.sybase.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.sybase.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.sybase.Trigger
    interval: "PT5M"
    url: jdbc:sybase:Tds:127.0.0.1:5000/
    username: syb_user
    password: syb_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.sybase.Trigger" required
Constant: "io.kestra.plugin.jdbc.sybase.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.trino.Query object
Examples

Execute a query and fetch results to pass it to downstream tasks.

id: trino_query
namespace: company.team

tasks:
  - id: analyze_orders
    type: io.kestra.plugin.jdbc.trino.Query
    url: jdbc:trino://localhost:8080/tpch
    username: trino_user
    password: trino_password
    sql: |
      select orderpriority as priority, sum(totalprice) as total
      from tpch.tiny.orders
      group by orderpriority
      order by orderpriority
    fetchType: FETCH
    fetchType: STORE

  - id: csv_report
    type: io.kestra.plugin.serdes.csv.IonToCsv
    from: "{{ outputs.analyze_orders.uri }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.trino.Query" required
Constant: "io.kestra.plugin.jdbc.trino.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.trino.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.trino.Trigger
    interval: "PT5M"
    url: jdbc:trino://localhost:8080/tpch
    username: trino_user
    password: trino_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.trino.Trigger" required
Constant: "io.kestra.plugin.jdbc.trino.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.vectorwise.Batch object
Examples

Fetch rows from a table and bulk insert to another one.

id: vectorwise_batch_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.vectorwise.Query
    url: jdbc:vectorwise://dev:port/base
    username: admin
    password: admin_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.vectorwise.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:vectorwise://prod:port/base
    username: admin
    password: admin_password
    sql: insert into xref values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )

Fetch rows from a table and bulk insert to another one without using sql query.

id: vectorwise_batch_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.vectorwise.Query
    url: jdbc:vectorwise://dev:port/base
    username: admin
    password: admin_passwd
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.vectorwise.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:vectorwise://prod:port/base
    username: admin
    password: admin_passwd
    table: xref
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sql string required

The query must have as many question marks as the number of columns in the table. Example: 'insert into <table_name> values( ? , ? , ? )' for 3 columns. In case you do not want all columns, you need to specify it in the query in the columns property Example: 'insert into <table_name> (id, name) values( ? , ? )' for inserting data into 2 columns: 'id' and 'name'.

type const: "io.kestra.plugin.jdbc.vectorwise.Batch" required
Constant: "io.kestra.plugin.jdbc.vectorwise.Batch"
url string required
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
columns string[]

If not provided, ? count need to match the from number of columns.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
table string

This property specifies the table name which will be used to retrieve the columns for the inserted values. You can use it instead of specifying manually the columns in the columns property. In this case, the sql property can also be omitted, an INSERT statement would be generated automatically.

timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.vectorwise.Query object
Examples

Send a SQL query to a Vectorwise database and fetch a row as output.

id: vectorwise_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.vectorwise.Query
    url: jdbc:vectorwise://url:port/base
    username: admin
    password: admin_password
    sql: select * from vectorwise_types
    fetchType: FETCH_ONE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.vectorwise.Query" required
Constant: "io.kestra.plugin.jdbc.vectorwise.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.vectorwise.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.vectorwise.Trigger
    interval: "PT5M"
    url: jdbc:vectorwise://url:port/base
    username: admin
    password: admin_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.vectorwise.Trigger" required
Constant: "io.kestra.plugin.jdbc.vectorwise.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.vertica.Batch object
Examples

Fetch rows from a table and bulk insert to another one.

id: vertica_batch_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.vertica.Query
    url: jdbc:vertica://dev:56982/db
    username: vertica_user
    password: vertica_password
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: FETCH
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.vertica.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:vertica://prod:56982/db
    username: vertica_user
    password: vertica_password
    sql: insert into xref values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )

Fetch rows from a table and bulk insert to another one, without using sql query.

id: vertica_batch_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.vertica.Query
    url: jdbc:vertica://dev:56982/db
    username: vertica_user
    password: vertica_passwd
    sql: |
      SELECT *
      FROM xref
      LIMIT 1500;
    fetchType: FETCH
    fetchType: STORE

  - id: update
    type: io.kestra.plugin.jdbc.vertica.Batch
    from: "{{ outputs.query.uri }}"
    url: jdbc:vertica://prod:56982/db
    username: vertica_user
    password: vertica_passwd
    table: xref
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sql string required

The query must have as many question marks as the number of columns in the table. Example: 'insert into <table_name> values( ? , ? , ? )' for 3 columns. In case you do not want all columns, you need to specify it in the query in the columns property Example: 'insert into <table_name> (id, name) values( ? , ? )' for inserting data into 2 columns: 'id' and 'name'.

type const: "io.kestra.plugin.jdbc.vertica.Batch" required
Constant: "io.kestra.plugin.jdbc.vertica.Batch"
url string required
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
columns string[]

If not provided, ? count need to match the from number of columns.

description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
table string

This property specifies the table name which will be used to retrieve the columns for the inserted values. You can use it instead of specifying manually the columns in the columns property. In this case, the sql property can also be omitted, an INSERT statement would be generated automatically.

timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.vertica.Query object
Examples

Send a SQL query to a Vertica database, and fetch a row as output.

id: vertica_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.jdbc.vertica.Query
    url: jdbc:vertica://127.0.0.1:56982/db
    username: vertica_user
    password: vertica_password
    sql: select * from customer
    fetchType: FETCH_ONE

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.vertica.Query" required
Constant: "io.kestra.plugin.jdbc.vertica.Query"
url string required
allowFailure boolean

Default value is : false

Default: false
autoCommit boolean

Sets this connection's auto-commit mode to the given state. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode except when you are using store property in which case the auto-commit will be disabled.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
store boolean

Default value is : false

Default: false
timeZoneId string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jdbc.vertica.Trigger object
Examples

Wait for a SQL query to return results, and then iterate through rows.

id: jdbc_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.jdbc.vertica.Trigger
    interval: "PT5M"
    url: jdbc:vertica://127.0.0.1:56982/db
    username: vertica_user
    password: vertica_password
    sql: "SELECT * FROM my_table"
    fetchType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.jdbc.vertica.Trigger" required
Constant: "io.kestra.plugin.jdbc.vertica.Trigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
fetch boolean

Default value is : false

Default: false
fetchOne boolean

Default value is : false

Default: false
fetchSize integer

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for this ResultSet object. If the fetch size specified is zero, the JDBC driver ignores the value and is free to make its own best guess as to what the fetch size should be. Ignored if autoCommit is false.

Default value is : 10000

Default: 10000
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
sql string
stopAfter string[]
store boolean

Default value is : false

Default: false
timeZoneId string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jira.issues.Create object
Examples

Create a jira ticket on a failed flow execution using basic authentication.

id: jira_flow
namespace: company.myteam

tasks:
  - id: create_issue
    type: io.kestra.plugin.jira.issues.Create
    baseUrl: your-domain.atlassian.net
    username: [email protected]
    password: "{{ secret('your_jira_api_token') }}"
    projectKey: myproject
    summary: "Workflow failed"
    description: "{{ execution.id }} has failed on {{ taskrun.startDate }} See the link below for more details"
    labels:
      - bug
      - workflow

Create a jira ticket on a failed flow execution using OAUTH2 access token authentication.

id: jira_flow
namespace: company.myteam

tasks:
  - id: create_issue
    type: io.kestra.plugin.jira.issues.Create
    baseUrl: your-domain.atlassian.net
    accessToken: "{{ secret('your_jira_access_token') }}"
    projectKey: myproject
    summary: "Workflow failed"
    description: "{{ execution.id }} has failed on {{ taskrun.startDate }} See the link below for more details"
    labels:
      - bug
      - workflow

baseUrl string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
projectKey string required
minLength=1
type const: "io.kestra.plugin.jira.issues.Create" required
Constant: "io.kestra.plugin.jira.issues.Create"
accessToken string

(Required for OAuth authorization)

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
labels string[]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string

(Required for basic & API token authorization)

payload string
summary string
timeout string
format=duration
username string

(Required for basic & API token authorization)

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jira.issues.CreateComment object
Examples

Comment on a jira ticket on a failed flow execution.

id: jira_flow
namespace: company.myteam

tasks:
  - id: create_comment_on_a_ticket
    type: io.kestra.plugin.jira.issues.CreateComment
    baseUrl: your-domain.atlassian.net
    username: [email protected]
    password: "{{ secret('jira_api_token') }}"
    projectKey: project_key
    issueIdOrKey: "TID-53"
    body: "This ticket is not moving, do we need to outsource this!"

baseUrl string required
minLength=1
body string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
issueIdOrKey string required
minLength=1
projectKey string required
minLength=1
type const: "io.kestra.plugin.jira.issues.CreateComment" required
Constant: "io.kestra.plugin.jira.issues.CreateComment"
accessToken string

(Required for OAuth authorization)

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
labels string[]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string

(Required for basic & API token authorization)

payload string
summary string
timeout string
format=duration
username string

(Required for basic & API token authorization)

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.jira.issues.UpdateFields object

Update specific fields in a Jira ticket.##### Examples

Update a Jira ticket fields

  id: jira_update_field
  namespace: company.myteam

  tasks:
    - id: update_ticket_field
      type: io.kestra.plugin.jira.issues.UpdateFields
      baseUrl: your-domain.atlassian.net
      username: [email protected]
      password: "{{ secret('your_jira_api_token') }}"
      issueIdOrKey: YOUR_ISSUE_KEY
      fields:
        description: "Updated description of: {{ execution.id }}"
        customfield_10005: "Updated value"

baseUrl string required
minLength=1
fields object required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
issueIdOrKey string required
minLength=1
projectKey string required
minLength=1
type const: "io.kestra.plugin.jira.issues.UpdateFields" required
Constant: "io.kestra.plugin.jira.issues.UpdateFields"
accessToken string

(Required for OAuth authorization)

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
labels string[]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string

(Required for basic & API token authorization)

payload string
summary string
timeout string
format=duration
username string

(Required for basic & API token authorization)

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.kafka.Consume object
Examples
id: kafka_consume
namespace: company.team

tasks:
  - id: consume
    type: io.kestra.plugin.kafka.Consume
    topic: test_kestra
    properties:
      bootstrap.servers: localhost:9092
    serdeProperties:
      schema.registry.url: http://localhost:8085
    keyDeserializer: STRING
    valueDeserializer: AVRO

Connect to a Kafka cluster with SSL.

id: kafka_consume
namespace: company.team

tasks:
  - id: consume
    type: io.kestra.plugin.kafka.Consume
    properties:
      security.protocol: SSL
      bootstrap.servers: localhost:19092
      ssl.key.password: my-ssl-password
      ssl.keystore.type: PKCS12
      ssl.keystore.location: my-base64-encoded-keystore
      ssl.keystore.password: my-ssl-password
      ssl.truststore.location: my-base64-encoded-truststore
      ssl.truststore.password: my-ssl-password
    topic:
      - kestra_workerinstance
    keyDeserializer: STRING
    valueDeserializer: STRING

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
properties object required

The bootstrap.servers property is a minimal required configuration to connect to a Kafka topic. This property can reference any valid Consumer Configs or Producer Configs as key-value pairs.

If you want to pass a truststore or a keystore, you must provide a base64 encoded string for ssl.keystore.location and ssl.truststore.location.

type const: "io.kestra.plugin.kafka.Consume" required
Constant: "io.kestra.plugin.kafka.Consume"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
groupId string

Using a consumer group, we will fetch only records that haven't been consumed yet.

keyDeserializer string

Possible values are: STRING, INTEGER, FLOAT, DOUBLE, LONG, SHORT, BYTE_ARRAY, BYTE_BUFFER, BYTES, UUID, VOID, AVRO, JSON.

Default value is : STRING

Default: "STRING"
Values: "STRING" "INTEGER" "FLOAT" "DOUBLE" "LONG" "SHORT" "BYTE_ARRAY" "BYTE_BUFFER" "BYTES" "UUID" "VOID" "AVRO" "JSON"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's a soft limit evaluated every second.

format=duration
maxRecords integer

It's a soft limit evaluated every second.

partitions integer[]

Manually assign a list of partitions to the consumer.

pollDuration string

If no records are available, the maximum wait duration to wait for new records.

Default value is : 5.000000000

Default: 5.0
format=duration
serdeProperties object

Configuration that will be passed to serializer or deserializer. The avro.use.logical.type.converters is always passed when you have any values set to true.

Default value is : {}

Default:
{}
since string

By default, we consume all messages from the topics with no consumer group or depending on the configuration of the auto.offset.reset property. However, you can provide an arbitrary start time. This property is ignored if a consumer group is used. It must be a valid ISO 8601 date.

timeout string
format=duration
topic

It can be a string or a list of strings to consume from one or multiple topics.

topicPattern string

Consumer will subscribe to all topics matching the specified pattern to get dynamically assigned partitions.

valueDeserializer string

Possible values are: STRING, INTEGER, FLOAT, DOUBLE, LONG, SHORT, BYTE_ARRAY, BYTE_BUFFER, BYTES, UUID, VOID, AVRO, JSON.

Default value is : STRING

Default: "STRING"
Values: "STRING" "INTEGER" "FLOAT" "DOUBLE" "LONG" "SHORT" "BYTE_ARRAY" "BYTE_BUFFER" "BYTES" "UUID" "VOID" "AVRO" "JSON"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.kafka.Produce object
Examples

Read a CSV file, transform it and send it to Kafka.

id: send_message_to_kafka
namespace: company.team

inputs:
  - id: file
    type: FILE
    description: A CSV file with columns: id, username, tweet, and timestamp.

tasks:
  - id: csv_to_ion
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ inputs.file }}"

  - id: ion_to_avro_schema
    type: io.kestra.plugin.scripts.nashorn.FileTransform
    from: "{{ outputs.csv_to_ion.uri }}"
    script: |
      var result = {
        "key": row.id,
        "value": {
          "username": row.username,
          "tweet": row.tweet
        },
        "timestamp": row.timestamp,
        "headers": {
          "key": "value"
        }
      };
      row = result

  - id: avro_to_kafka
    type: io.kestra.plugin.kafka.Produce
    from: "{{ outputs.ion_to_avro_schema.uri }}"
    keySerializer: STRING
    properties:
      bootstrap.servers: localhost:9092
    serdeProperties:
      schema.registry.url: http://localhost:8085
    topic: test_kestra
    valueAvroSchema: |
      {"type":"record","name":"twitter_schema","namespace":"io.kestra.examples","fields":[{"name":"username","type":"string"},{"name":"tweet","type":"string"}]}
    valueSerializer: AVRO

from string | array | object required

Can be a Kestra internal storage URI, a map (i.e. a list of key-value pairs) or a list of maps. The following keys are supported: key, value, partition, timestamp, and headers.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
properties object required

The bootstrap.servers property is a minimal required configuration to connect to a Kafka topic. This property can reference any valid Consumer Configs or Producer Configs as key-value pairs.

If you want to pass a truststore or a keystore, you must provide a base64 encoded string for ssl.keystore.location and ssl.truststore.location.

type const: "io.kestra.plugin.kafka.Produce" required
Constant: "io.kestra.plugin.kafka.Produce"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
keyAvroSchema string
keySerializer string

Possible values are: STRING, INTEGER, FLOAT, DOUBLE, LONG, SHORT, BYTE_ARRAY, BYTE_BUFFER, BYTES, UUID, VOID, AVRO, JSON.

Default value is : STRING

Default: "STRING"
Values: "STRING" "INTEGER" "FLOAT" "DOUBLE" "LONG" "SHORT" "BYTE_ARRAY" "BYTE_BUFFER" "BYTES" "UUID" "VOID" "AVRO" "JSON"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
serdeProperties object

Configuration that will be passed to serializer or deserializer. The avro.use.logical.type.converters is always passed when you have any values set to true.

Default value is : {}

Default:
{}
timeout string
format=duration
topic string

Could also be passed inside the from property using the key topic.

transactional boolean

Default value is : true

Default: true
valueAvroSchema string
valueSerializer string

Possible values are: STRING, INTEGER, FLOAT, DOUBLE, LONG, SHORT, BYTE_ARRAY, BYTE_BUFFER, BYTES, UUID, VOID, AVRO, JSON.

Default value is : STRING

Default: "STRING"
Values: "STRING" "INTEGER" "FLOAT" "DOUBLE" "LONG" "SHORT" "BYTE_ARRAY" "BYTE_BUFFER" "BYTES" "UUID" "VOID" "AVRO" "JSON"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.kafka.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.kafka.Trigger instead.##### Examples

Consume a message from a Kafka topic in real time.

id: kafka_realtime_trigger
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.value }}"

triggers:
  - id: realtime_trigger
    type: io.kestra.plugin.kafka.RealtimeTrigger
    topic: test_kestra
    properties:
      bootstrap.servers: localhost:9092
    serdeProperties:
      schema.registry.url: http://localhost:8085
      keyDeserializer: STRING
      valueDeserializer: AVRO
    groupId: kafkaConsumerGroupId
groupId string required

Using a consumer group, we will fetch only records that haven't been consumed yet.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
properties object required

The bootstrap.servers property is a minimal required configuration to connect to a Kafka topic. This property can reference any valid Consumer Configs or Producer Configs as key-value pairs.

If you want to pass a truststore or a keystore, you must provide a base64 encoded string for ssl.keystore.location and ssl.truststore.location.

type const: "io.kestra.plugin.kafka.RealtimeTrigger" required
Constant: "io.kestra.plugin.kafka.RealtimeTrigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
keyDeserializer string

Possible values are: STRING, INTEGER, FLOAT, DOUBLE, LONG, SHORT, BYTE_ARRAY, BYTE_BUFFER, BYTES, UUID, VOID, AVRO, JSON.

Default value is : STRING

Default: "STRING"
Values: "STRING" "INTEGER" "FLOAT" "DOUBLE" "LONG" "SHORT" "BYTE_ARRAY" "BYTE_BUFFER" "BYTES" "UUID" "VOID" "AVRO" "JSON"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
partitions integer[]

Manually assign a list of partitions to the consumer.

serdeProperties object

Configuration that will be passed to serializer or deserializer. The avro.use.logical.type.converters is always passed when you have any values set to true.

Default value is : {}

Default:
{}
since string

By default, we consume all messages from the topics with no consumer group or depending on the configuration of the auto.offset.reset property. However, you can provide an arbitrary start time. This property is ignored if a consumer group is used. It must be a valid ISO 8601 date.

stopAfter string[]
topic

It can be a string or a list of strings to consume from one or multiple topics.

topicPattern string

Consumer will subscribe to all topics matching the specified pattern to get dynamically assigned partitions.

valueDeserializer string

Possible values are: STRING, INTEGER, FLOAT, DOUBLE, LONG, SHORT, BYTE_ARRAY, BYTE_BUFFER, BYTES, UUID, VOID, AVRO, JSON.

Default value is : STRING

Default: "STRING"
Values: "STRING" "INTEGER" "FLOAT" "DOUBLE" "LONG" "SHORT" "BYTE_ARRAY" "BYTE_BUFFER" "BYTES" "UUID" "VOID" "AVRO" "JSON"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.kafka.Trigger object

Note that you don't need an extra task to consume the message from the event trigger. The trigger will automatically consume messages and you can retrieve their content in your flow using the {{ trigger.uri }} variable. If you would like to consume each message from a Kafka topic in real-time and create one execution per message, you can use the io.kestra.plugin.kafka.RealtimeTrigger instead.##### Examples

id: kafka_trigger
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.value }}"

triggers:
  - id: trigger
    type: io.kestra.plugin.kafka.Trigger
    topic: test_kestra
    properties:
      bootstrap.servers: localhost:9092
    serdeProperties:
      schema.registry.url: http://localhost:8085
      keyDeserializer: STRING
      valueDeserializer: AVRO
    interval: PT30S
    maxRecords: 5
    groupId: kafkaConsumerGroupId

groupId string required

Using a consumer group, we will fetch only records that haven't been consumed yet.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
properties object required

The bootstrap.servers property is a minimal required configuration to connect to a Kafka topic. This property can reference any valid Consumer Configs or Producer Configs as key-value pairs.

If you want to pass a truststore or a keystore, you must provide a base64 encoded string for ssl.keystore.location and ssl.truststore.location.

type const: "io.kestra.plugin.kafka.Trigger" required
Constant: "io.kestra.plugin.kafka.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
keyDeserializer string

Possible values are: STRING, INTEGER, FLOAT, DOUBLE, LONG, SHORT, BYTE_ARRAY, BYTE_BUFFER, BYTES, UUID, VOID, AVRO, JSON.

Default value is : STRING

Default: "STRING"
Values: "STRING" "INTEGER" "FLOAT" "DOUBLE" "LONG" "SHORT" "BYTE_ARRAY" "BYTE_BUFFER" "BYTES" "UUID" "VOID" "AVRO" "JSON"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's a soft limit evaluated every second.

format=duration
maxRecords integer

It's a soft limit evaluated every second.

partitions integer[]

Manually assign a list of partitions to the consumer.

pollDuration string

If no records are available, the maximum wait duration to wait for new records.

Default value is : 5.000000000

Default: 5.0
format=duration
serdeProperties object

Configuration that will be passed to serializer or deserializer. The avro.use.logical.type.converters is always passed when you have any values set to true.

Default value is : {}

Default:
{}
since string

By default, we consume all messages from the topics with no consumer group or depending on the configuration of the auto.offset.reset property. However, you can provide an arbitrary start time. This property is ignored if a consumer group is used. It must be a valid ISO 8601 date.

stopAfter string[]
topic

It can be a string or a list of strings to consume from one or multiple topics.

topicPattern string

Consumer will subscribe to all topics matching the specified pattern to get dynamically assigned partitions.

valueDeserializer string

Possible values are: STRING, INTEGER, FLOAT, DOUBLE, LONG, SHORT, BYTE_ARRAY, BYTE_BUFFER, BYTES, UUID, VOID, AVRO, JSON.

Default value is : STRING

Default: "STRING"
Values: "STRING" "INTEGER" "FLOAT" "DOUBLE" "LONG" "SHORT" "BYTE_ARRAY" "BYTE_BUFFER" "BYTES" "UUID" "VOID" "AVRO" "JSON"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.kubernetes.PodCreate object
Examples

Launch a Pod

id: kubernetes_pod_create
namespace: company.team

tasks:
  - id: pod_create
    type: io.kestra.plugin.kubernetes.PodCreate
    namespace: default
    metadata:
      labels:
        my-label: my-value
    spec:
      containers:
      - name: unittest
        image: debian:stable-slim
        command:
          - 'bash'
          - '-c'
          - 'for i in {1..10}; do echo $i; sleep 0.1; done'
    restartPolicy: Never

Launch a Pod with input files and gather its output files.

id: kubernetes
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: kubernetes
    type: io.kestra.plugin.kubernetes.PodCreate
    spec:
      containers:
      - name: unittest
        image: centos
        command:
          - cp
          - "{{workingDir}}/data.txt"
          - "{{workingDir}}/out.txt"
      restartPolicy: Never
    waitUntilRunning: PT3M
    inputFiles:
      data.txt: "{{inputs.file}}"
    outputFiles:
      - out.txt
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
spec object required
type const: "io.kestra.plugin.kubernetes.PodCreate" required
Constant: "io.kestra.plugin.kubernetes.PodCreate"
allowFailure boolean

Default value is : false

Default: false
connection
All of: io.kestra.plugin.kubernetes.models.Connection object, The connection parameters to the Kubernetes cluster
delete boolean

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
fileSidecar
All of: io.kestra.plugin.kubernetes.models.SideCar object, The configuration of the file sidecar container that handle download and upload of files.
inputFiles Record<string, string>

The files will be available inside the kestra/working-dir directory of the container. You can use the special variable {{workingDir}} in your command to refer to it.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata object
namespace string

Default value is : default

Default: "default"
outputFiles string[]

Only files created inside the kestra/working-dir directory of the container can be retrieved. Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt..

resume boolean

Default value is : true

Default: true
timeout string
format=duration
waitForLogInterval string

Default value is : 2.000000000

Default: 2.0
format=duration
waitRunning string

Default value is : 3600.000000000

Default: 3600.0
format=duration
waitUntilRunning string

This timeout is the maximum time that Kubernetes scheduler will take to

  • schedule the job
  • pull the pod image
  • and start the pod.

Default value is : 600.000000000

Default: 600.0
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.kubernetes.kubectl.Apply object
Examples

Apply a Kubernetes resource, using YAML.

id: create_or_replace_deployment
namespace: company.team

tasks:
  - id: apply
    type: io.kestra.plugin.kubernetes.kubectl.Apply
    namespace: default
    spec: |-
      apiVersion: apps/v1
      kind: Deployment
      metadata:
        name: mypod

Apply a Kubernetes resource, using a namespace file.

id: create_or_replace_deployment
namespace: company.team

tasks:
  - id: apply
    type: io.kestra.plugin.kubernetes.kubectl.Apply
    namespaceFiles:
      enabled: true
    namespace: default
    spec: "{{ read('deployment.yaml') }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
spec string required
type const: "io.kestra.plugin.kubernetes.kubectl.Apply" required
Constant: "io.kestra.plugin.kubernetes.kubectl.Apply"
allowFailure boolean

Default value is : false

Default: false
connection
All of: io.kestra.plugin.kubernetes.models.Connection object, The connection parameters to the Kubernetes cluster
description string
disabled boolean

Default value is : false

Default: false
fileSidecar
All of: io.kestra.plugin.kubernetes.models.SideCar object, The configuration of the file sidecar container that handle download and upload of files.
inputFiles Record<string, string>

The files will be available inside the kestra/working-dir directory of the container. You can use the special variable {{workingDir}} in your command to refer to it.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespace string
outputFiles string[]

Only files created inside the kestra/working-dir directory of the container can be retrieved. Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt..

timeout string
format=duration
waitRunning string

Default value is : 3600.000000000

Default: 3600.0
format=duration
waitUntilRunning string

This timeout is the maximum time that Kubernetes scheduler will take to

  • schedule the job
  • pull the pod image
  • and start the pod.

Default value is : 600.000000000

Default: 600.0
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.kubernetes.models.Connection object
apiVersion string

Default value is : v1

Default: "v1"
caCertData string
caCertFile string
clientCertData string
clientCertFile string
clientKeyAlgo string

default is RSA

Default value is : RSA

Default: "RSA"
clientKeyData string
clientKeyFile string
clientKeyPassphrase string
disableHostnameVerification boolean
keyStoreFile string
keyStorePassphrase string
masterUrl string

Default value is : https://kubernetes.default.svc

Default: "https://kubernetes.default.svc"
namespace string
oauthToken string
oauthTokenProvider
All of: io.kestra.plugin.kubernetes.models.OAuthTokenProvider object, Oauth token provider
password string
trustCerts boolean
trustStoreFile string
trustStorePassphrase string
username string
io.kestra.plugin.kubernetes.models.OAuthTokenProvider object
output string
task io.kestra.plugin.core.debug.Echo | io.kestra.plugin.core.debug.Return | io.kestra.plugin.core.execution.Count | io.kestra.plugin.core.execution.Fail | io.kestra.plugin.core.execution.Labels | io.kestra.plugin.core.execution.PurgeExecutions | io.kestra.plugin.core.execution.Resume | io.kestra.plugin.core.flow.AllowFailure | io.kestra.plugin.core.flow.Dag | io.kestra.plugin.core.flow.EachParallel | io.kestra.plugin.core.flow.EachSequential | io.kestra.plugin.core.flow.ForEach | io.kestra.plugin.core.flow.ForEachItem | io.kestra.plugin.core.flow.If | io.kestra.plugin.core.flow.Parallel | io.kestra.plugin.core.flow.Pause | io.kestra.plugin.core.flow.Sequential | io.kestra.plugin.core.flow.Subflow | io.kestra.plugin.core.flow.Switch | io.kestra.plugin.core.flow.Template | io.kestra.plugin.core.flow.WaitFor | io.kestra.plugin.core.flow.WorkingDirectory | io.kestra.plugin.core.http.Download | io.kestra.plugin.core.http.Request | io.kestra.plugin.core.kv.Delete | io.kestra.plugin.core.kv.Get | io.kestra.plugin.core.kv.GetKeys | io.kestra.plugin.core.kv.Set | io.kestra.plugin.core.log.Fetch | io.kestra.plugin.core.log.Log | io.kestra.plugin.core.log.PurgeLogs | io.kestra.plugin.core.namespace.DeleteFiles | io.kestra.plugin.core.namespace.DownloadFiles | io.kestra.plugin.core.namespace.UploadFiles | io.kestra.plugin.core.output.OutputValues | io.kestra.plugin.core.state.Delete | io.kestra.plugin.core.state.Get | io.kestra.plugin.core.state.Set | io.kestra.plugin.core.storage.Concat | io.kestra.plugin.core.storage.DeduplicateItems | io.kestra.plugin.core.storage.Delete | io.kestra.plugin.core.storage.FilterItems | io.kestra.plugin.core.storage.LocalFiles | io.kestra.plugin.core.storage.PurgeCurrentExecutionFiles | io.kestra.plugin.core.storage.Reverse | io.kestra.plugin.core.storage.Size | io.kestra.plugin.core.storage.Split | io.kestra.plugin.core.templating.TemplatedTask | io.kestra.plugin.core.trigger.Toggle | io.kestra.core.tasks.scripts.Bash | io.kestra.plugin.scripts.shell.Commands | io.kestra.plugin.scripts.shell.Script | io.kestra.plugin.solace.Consume | io.kestra.plugin.solace.Produce | io.kestra.plugin.debezium.db2.Capture | io.kestra.plugin.scripts.jython.Eval | io.kestra.plugin.scripts.jython.FileTransform | io.kestra.plugin.git.Clone | io.kestra.plugin.git.Push | io.kestra.plugin.git.PushFlows | io.kestra.plugin.git.PushNamespaceFiles | io.kestra.plugin.git.Sync | io.kestra.plugin.git.SyncFlows | io.kestra.plugin.git.SyncNamespaceFiles | io.kestra.plugin.jdbc.sqlite.Query | io.kestra.plugin.debezium.mysql.Capture | io.kestra.plugin.jdbc.as400.Query | io.kestra.plugin.surrealdb.Query | io.kestra.plugin.ansible.cli.AnsibleCLI | io.kestra.plugin.jdbc.arrowflight.Query | io.kestra.plugin.serdes.avro.AvroToIon | io.kestra.plugin.serdes.avro.IonToAvro | io.kestra.plugin.serdes.csv.CsvToIon | io.kestra.plugin.serdes.csv.IonToCsv | io.kestra.plugin.serdes.excel.ExcelToIon | io.kestra.plugin.serdes.excel.IonToExcel | io.kestra.plugin.serdes.json.IonToJson | io.kestra.plugin.serdes.json.JsonToIon | io.kestra.plugin.serdes.parquet.IonToParquet | io.kestra.plugin.serdes.parquet.ParquetToIon | io.kestra.plugin.serdes.xml.IonToXml | io.kestra.plugin.serdes.xml.XmlToIon | io.kestra.plugin.debezium.mongodb.Capture | io.kestra.plugin.jdbc.vertica.Batch | io.kestra.plugin.jdbc.vertica.Query | io.kestra.plugin.jdbc.pinot.Query | io.kestra.plugin.neo4j.Batch | io.kestra.plugin.neo4j.Query | io.kestra.plugin.nats.Consume | io.kestra.plugin.nats.Produce | io.kestra.plugin.nats.kv.CreateBucket | io.kestra.plugin.nats.kv.Delete | io.kestra.plugin.nats.kv.Get | io.kestra.plugin.nats.kv.Put | io.kestra.plugin.pulsar.Consume | io.kestra.plugin.pulsar.Produce | io.kestra.plugin.pulsar.Reader | io.kestra.plugin.scripts.ruby.Commands | io.kestra.plugin.scripts.ruby.Script | io.kestra.plugin.notifications.discord.DiscordExecution | io.kestra.plugin.notifications.discord.DiscordIncomingWebhook | io.kestra.plugin.notifications.google.GoogleChatExecution | io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook | io.kestra.plugin.notifications.mail.MailExecution | io.kestra.plugin.notifications.mail.MailSend | io.kestra.plugin.notifications.opsgenie.OpsgenieAlert | io.kestra.plugin.notifications.opsgenie.OpsgenieExecution | io.kestra.plugin.notifications.pagerduty.PagerDutyAlert | io.kestra.plugin.notifications.pagerduty.PagerDutyExecution | io.kestra.plugin.notifications.sendgrid.SendGridMailExecution | io.kestra.plugin.notifications.sendgrid.SendGridMailSend | io.kestra.plugin.notifications.sentry.SentryAlert | io.kestra.plugin.notifications.sentry.SentryExecution | io.kestra.plugin.notifications.slack.SlackExecution | io.kestra.plugin.notifications.slack.SlackIncomingWebhook | io.kestra.plugin.notifications.teams.TeamsExecution | io.kestra.plugin.notifications.teams.TeamsIncomingWebhook | io.kestra.plugin.notifications.telegram.TelegramExecution | io.kestra.plugin.notifications.telegram.TelegramSend | io.kestra.plugin.notifications.twilio.TwilioAlert | io.kestra.plugin.notifications.twilio.TwilioExecution | io.kestra.plugin.notifications.whatsapp.WhatsAppExecution | io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook | io.kestra.plugin.notifications.zenduty.ZendutyAlert | io.kestra.plugin.notifications.zenduty.ZendutyExecution | io.kestra.plugin.notifications.zulip.ZulipExecution | io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook | io.kestra.plugin.transform.grok.TransformItems | io.kestra.plugin.transform.grok.TransformValue | io.kestra.plugin.debezium.oracle.Capture | io.kestra.plugin.tika.Parse | io.kestra.plugin.jdbc.db2.Query | io.kestra.plugin.debezium.sqlserver.Capture | io.kestra.plugin.transform.jsonata.TransformItems | io.kestra.plugin.transform.jsonata.TransformValue | io.kestra.plugin.meilisearch.DocumentAdd | io.kestra.plugin.meilisearch.DocumentGet | io.kestra.plugin.meilisearch.FacetSearch | io.kestra.plugin.meilisearch.Search | io.kestra.plugin.jira.issues.Create | io.kestra.plugin.jira.issues.CreateComment | io.kestra.plugin.jira.issues.UpdateFields | io.kestra.plugin.singer.taps.BigQuery | io.kestra.plugin.singer.taps.BingAds | io.kestra.plugin.singer.taps.ChargeBee | io.kestra.plugin.singer.taps.ExchangeRateHost | io.kestra.plugin.singer.taps.FacebookAds | io.kestra.plugin.singer.taps.Fastly | io.kestra.plugin.singer.taps.GenericTap | io.kestra.plugin.singer.taps.GitHub | io.kestra.plugin.singer.taps.Gitlab | io.kestra.plugin.singer.taps.GoogleAdwords | io.kestra.plugin.singer.taps.GoogleAnalytics | io.kestra.plugin.singer.taps.GoogleSearchConsole | io.kestra.plugin.singer.taps.HubSpot | io.kestra.plugin.singer.taps.Marketo | io.kestra.plugin.singer.taps.Netsuite | io.kestra.plugin.singer.taps.PipelinewiseMongoDb | io.kestra.plugin.singer.taps.PipelinewiseMysql | io.kestra.plugin.singer.taps.PipelinewiseOracle | io.kestra.plugin.singer.taps.PipelinewisePostgres | io.kestra.plugin.singer.taps.PipelinewiseSqlServer | io.kestra.plugin.singer.taps.Quickbooks | io.kestra.plugin.singer.taps.Recharge | io.kestra.plugin.singer.taps.SageIntacct | io.kestra.plugin.singer.taps.Salesforce | io.kestra.plugin.singer.taps.Shopify | io.kestra.plugin.singer.taps.Slack | io.kestra.plugin.singer.taps.Stripe | io.kestra.plugin.singer.taps.Zendesk | io.kestra.plugin.singer.taps.Zoom | io.kestra.plugin.singer.targets.AdswerveBigQuery | io.kestra.plugin.singer.targets.Csv | io.kestra.plugin.singer.targets.DatamillCoPostgres | io.kestra.plugin.singer.targets.GenericTarget | io.kestra.plugin.singer.targets.Json | io.kestra.plugin.singer.targets.MeltanoSnowflake | io.kestra.plugin.singer.targets.Oracle | io.kestra.plugin.singer.targets.PipelinewisePostgres | io.kestra.plugin.singer.targets.PipelinewiseRedshift | io.kestra.plugin.singer.targets.PipelinewiseSnowflake | io.kestra.plugin.singer.targets.SqlServer | io.kestra.plugin.gcp.auth.OauthAccessToken | io.kestra.plugin.gcp.bigquery.Copy | io.kestra.plugin.gcp.bigquery.CopyPartitions | io.kestra.plugin.gcp.bigquery.CreateDataset | io.kestra.plugin.gcp.bigquery.CreateTable | io.kestra.plugin.gcp.bigquery.DeleteDataset | io.kestra.plugin.gcp.bigquery.DeletePartitions | io.kestra.plugin.gcp.bigquery.DeleteTable | io.kestra.plugin.gcp.bigquery.ExtractToGcs | io.kestra.plugin.gcp.bigquery.Load | io.kestra.plugin.gcp.bigquery.LoadFromGcs | io.kestra.plugin.gcp.bigquery.Query | io.kestra.plugin.gcp.bigquery.StorageWrite | io.kestra.plugin.gcp.bigquery.TableMetadata | io.kestra.plugin.gcp.bigquery.UpdateDataset | io.kestra.plugin.gcp.bigquery.UpdateTable | io.kestra.plugin.gcp.cli.GCloudCLI | io.kestra.plugin.gcp.dataproc.batches.PySparkSubmit | io.kestra.plugin.gcp.dataproc.batches.RSparkSubmit | io.kestra.plugin.gcp.dataproc.batches.SparkSqlSubmit | io.kestra.plugin.gcp.dataproc.batches.SparkSubmit | io.kestra.plugin.gcp.dataproc.clusters.Create | io.kestra.plugin.gcp.dataproc.clusters.Delete | io.kestra.plugin.gcp.firestore.Delete | io.kestra.plugin.gcp.firestore.Get | io.kestra.plugin.gcp.firestore.Query | io.kestra.plugin.gcp.firestore.Set | io.kestra.plugin.gcp.gcs.Compose | io.kestra.plugin.gcp.gcs.Copy | io.kestra.plugin.gcp.gcs.CreateBucket | io.kestra.plugin.gcp.gcs.CreateBucketIamPolicy | io.kestra.plugin.gcp.gcs.Delete | io.kestra.plugin.gcp.gcs.DeleteBucket | io.kestra.plugin.gcp.gcs.DeleteList | io.kestra.plugin.gcp.gcs.Download | io.kestra.plugin.gcp.gcs.Downloads | io.kestra.plugin.gcp.gcs.List | io.kestra.plugin.gcp.gcs.UpdateBucket | io.kestra.plugin.gcp.gcs.Upload | io.kestra.plugin.gcp.gke.ClusterMetadata | io.kestra.plugin.gcp.pubsub.Consume | io.kestra.plugin.gcp.pubsub.Publish | io.kestra.plugin.gcp.vertexai.ChatCompletion | io.kestra.plugin.gcp.vertexai.CustomJob | io.kestra.plugin.gcp.vertexai.MultimodalCompletion | io.kestra.plugin.gcp.vertexai.TextCompletion | io.kestra.plugin.hubspot.tickets.Create | io.kestra.plugin.terraform.cli.TerraformCLI | io.kestra.plugin.jdbc.duckdb.Query | io.kestra.plugin.powerbi.RefreshGroupDataset | io.kestra.plugin.minio.Copy | io.kestra.plugin.minio.CreateBucket | io.kestra.plugin.minio.Delete | io.kestra.plugin.minio.DeleteList | io.kestra.plugin.minio.Download | io.kestra.plugin.minio.Downloads | io.kestra.plugin.minio.List | io.kestra.plugin.minio.Upload | io.kestra.plugin.spark.JarSubmit | io.kestra.plugin.spark.PythonSubmit | io.kestra.plugin.spark.RSubmit | io.kestra.plugin.spark.SparkCLI | io.kestra.plugin.scripts.powershell.Commands | io.kestra.plugin.scripts.powershell.Script | io.kestra.plugin.jdbc.oracle.Batch | io.kestra.plugin.jdbc.oracle.Query | io.kestra.plugin.elasticsearch.Bulk | io.kestra.plugin.elasticsearch.Get | io.kestra.plugin.elasticsearch.Load | io.kestra.plugin.elasticsearch.Put | io.kestra.plugin.elasticsearch.Request | io.kestra.plugin.elasticsearch.Scroll | io.kestra.plugin.elasticsearch.Search | io.kestra.plugin.cloudquery.CloudQueryCLI | io.kestra.plugin.cloudquery.Sync | io.kestra.plugin.openai.ChatCompletion | io.kestra.plugin.openai.CreateImage | io.kestra.plugin.amqp.Consume | io.kestra.plugin.amqp.CreateQueue | io.kestra.plugin.amqp.DeclareExchange | io.kestra.plugin.amqp.Publish | io.kestra.plugin.amqp.QueueBind | io.kestra.plugin.scripts.r.Commands | io.kestra.plugin.scripts.r.Script | io.kestra.plugin.hightouch.Sync | io.kestra.plugin.ldap.Add | io.kestra.plugin.ldap.Delete | io.kestra.plugin.ldap.IonToLdif | io.kestra.plugin.ldap.LdifToIon | io.kestra.plugin.ldap.Modify | io.kestra.plugin.ldap.Search | io.kestra.plugin.cassandra.astradb.Query | io.kestra.plugin.cassandra.standard.Query | io.kestra.plugin.malloy.CLI | io.kestra.plugin.fivetran.connectors.Sync | io.kestra.plugin.mongodb.Bulk | io.kestra.plugin.mongodb.Delete | io.kestra.plugin.mongodb.Find | io.kestra.plugin.mongodb.InsertOne | io.kestra.plugin.mongodb.Load | io.kestra.plugin.mongodb.Update | io.kestra.plugin.redis.list.ListPop | io.kestra.plugin.redis.list.ListPush | io.kestra.plugin.redis.pubsub.Publish | io.kestra.plugin.redis.string.Delete | io.kestra.plugin.redis.string.Get | io.kestra.plugin.redis.string.Set | io.kestra.plugin.zendesk.tickets.Create | io.kestra.plugin.databricks.cluster.CreateCluster | io.kestra.plugin.databricks.cluster.DeleteCluster | io.kestra.plugin.databricks.dbfs.Download | io.kestra.plugin.databricks.dbfs.Upload | io.kestra.plugin.databricks.job.CreateJob | io.kestra.plugin.databricks.job.SubmitRun | io.kestra.plugin.databricks.sql.Query | io.kestra.plugin.linear.issues.Create | io.kestra.plugin.airbyte.cloud.jobs.Reset | io.kestra.plugin.airbyte.cloud.jobs.Sync | io.kestra.plugin.airbyte.connections.CheckStatus | io.kestra.plugin.airbyte.connections.Sync | io.kestra.plugin.jdbc.trino.Query | io.kestra.plugin.jdbc.sybase.Query | io.kestra.plugin.airflow.dags.TriggerDagRun | io.kestra.plugin.scripts.jbang.Commands | io.kestra.plugin.scripts.jbang.Script | io.kestra.plugin.scripts.groovy.Eval | io.kestra.plugin.scripts.groovy.FileTransform | io.kestra.plugin.dataform.cli.DataformCLI | io.kestra.plugin.jdbc.sqlserver.Batch | io.kestra.plugin.jdbc.sqlserver.Query | io.kestra.plugin.jdbc.clickhouse.BulkInsert | io.kestra.plugin.jdbc.clickhouse.ClickHouseLocalCLI | io.kestra.plugin.jdbc.clickhouse.Query | io.kestra.plugin.jdbc.druid.Query | io.kestra.plugin.github.code.Search | io.kestra.plugin.github.commits.Search | io.kestra.plugin.github.issues.Comment | io.kestra.plugin.github.issues.Create | io.kestra.plugin.github.issues.Search | io.kestra.plugin.github.pulls.Create | io.kestra.plugin.github.pulls.Search | io.kestra.plugin.github.repositories.Search | io.kestra.plugin.github.topics.Search | io.kestra.plugin.github.users.Search | io.kestra.plugin.soda.Scan | io.kestra.plugin.docker.Build | io.kestra.plugin.docker.Run | io.kestra.plugin.servicenow.Post | io.kestra.plugin.fs.ftp.Delete | io.kestra.plugin.fs.ftp.Download | io.kestra.plugin.fs.ftp.Downloads | io.kestra.plugin.fs.ftp.List | io.kestra.plugin.fs.ftp.Move | io.kestra.plugin.fs.ftp.Upload | io.kestra.plugin.fs.ftp.Uploads | io.kestra.plugin.fs.ftps.Delete | io.kestra.plugin.fs.ftps.Download | io.kestra.plugin.fs.ftps.Downloads | io.kestra.plugin.fs.ftps.List | io.kestra.plugin.fs.ftps.Move | io.kestra.plugin.fs.ftps.Upload | io.kestra.plugin.fs.ftps.Uploads | io.kestra.plugin.fs.sftp.Delete | io.kestra.plugin.fs.sftp.Download | io.kestra.plugin.fs.sftp.Downloads | io.kestra.plugin.fs.sftp.List | io.kestra.plugin.fs.sftp.Move | io.kestra.plugin.fs.sftp.Upload | io.kestra.plugin.fs.sftp.Uploads | io.kestra.plugin.fs.smb.Delete | io.kestra.plugin.fs.smb.Download | io.kestra.plugin.fs.smb.Downloads | io.kestra.plugin.fs.smb.List | io.kestra.plugin.fs.smb.Move | io.kestra.plugin.fs.smb.Upload | io.kestra.plugin.fs.smb.Uploads | io.kestra.plugin.fs.ssh.Command | io.kestra.plugin.crypto.openpgp.Decrypt | io.kestra.plugin.crypto.openpgp.Encrypt | io.kestra.plugin.weaviate.BatchCreate | io.kestra.plugin.weaviate.Delete | io.kestra.plugin.weaviate.Query | io.kestra.plugin.weaviate.SchemaCreate | io.kestra.core.tasks.scripts.Node | io.kestra.plugin.scripts.node.Commands | io.kestra.plugin.scripts.node.Script | io.kestra.plugin.jdbc.mysql.Batch | io.kestra.plugin.jdbc.mysql.Query | io.kestra.plugin.modal.cli.ModalCLI | io.kestra.plugin.jdbc.vectorwise.Batch | io.kestra.plugin.jdbc.vectorwise.Query | io.kestra.plugin.jdbc.redshift.Query | io.kestra.plugin.kubernetes.PodCreate | io.kestra.plugin.kubernetes.kubectl.Apply | io.kestra.plugin.jdbc.postgresql.Batch | io.kestra.plugin.jdbc.postgresql.CopyIn | io.kestra.plugin.jdbc.postgresql.CopyOut | io.kestra.plugin.jdbc.postgresql.Query | io.kestra.plugin.mqtt.Publish | io.kestra.plugin.mqtt.Subscribe | io.kestra.plugin.sqlmesh.cli.SQLMeshCLI | io.kestra.plugin.couchbase.Query | io.kestra.plugin.scripts.julia.Commands | io.kestra.plugin.scripts.julia.Script | io.kestra.plugin.jdbc.dremio.Query | io.kestra.plugin.googleworkspace.drive.Create | io.kestra.plugin.googleworkspace.drive.Delete | io.kestra.plugin.googleworkspace.drive.Download | io.kestra.plugin.googleworkspace.drive.Export | io.kestra.plugin.googleworkspace.drive.List | io.kestra.plugin.googleworkspace.drive.Upload | io.kestra.plugin.googleworkspace.sheets.CreateSpreadsheet | io.kestra.plugin.googleworkspace.sheets.DeleteSpreadsheet | io.kestra.plugin.googleworkspace.sheets.Load | io.kestra.plugin.googleworkspace.sheets.Read | io.kestra.plugin.googleworkspace.sheets.ReadRange | io.kestra.plugin.dbt.cli.Build | io.kestra.plugin.dbt.cli.Compile | io.kestra.plugin.dbt.cli.DbtCLI | io.kestra.plugin.dbt.cli.Deps | io.kestra.plugin.dbt.cli.Freshness | io.kestra.plugin.dbt.cli.List | io.kestra.plugin.dbt.cli.Run | io.kestra.plugin.dbt.cli.Seed | io.kestra.plugin.dbt.cli.Setup | io.kestra.plugin.dbt.cli.Snapshot | io.kestra.plugin.dbt.cli.Test | io.kestra.plugin.dbt.cloud.CheckStatus | io.kestra.plugin.dbt.cloud.TriggerRun | io.kestra.plugin.scripts.nashorn.Eval | io.kestra.plugin.scripts.nashorn.FileTransform | io.kestra.plugin.aws.athena.Query | io.kestra.plugin.aws.cli.AwsCLI | io.kestra.plugin.aws.dynamodb.DeleteItem | io.kestra.plugin.aws.dynamodb.GetItem | io.kestra.plugin.aws.dynamodb.PutItem | io.kestra.plugin.aws.dynamodb.Query | io.kestra.plugin.aws.dynamodb.Scan | io.kestra.plugin.aws.ecr.GetAuthToken | io.kestra.plugin.aws.eventbridge.PutEvents | io.kestra.plugin.aws.kinesis.PutRecords | io.kestra.plugin.aws.lambda.Invoke | io.kestra.plugin.aws.s3.Copy | io.kestra.plugin.aws.s3.CreateBucket | io.kestra.plugin.aws.s3.Delete | io.kestra.plugin.aws.s3.DeleteList | io.kestra.plugin.aws.s3.Download | io.kestra.plugin.aws.s3.Downloads | io.kestra.plugin.aws.s3.List | io.kestra.plugin.aws.s3.Upload | io.kestra.plugin.aws.sns.Publish | io.kestra.plugin.aws.sqs.Consume | io.kestra.plugin.aws.sqs.Publish | io.kestra.plugin.compress.ArchiveCompress | io.kestra.plugin.compress.ArchiveDecompress | io.kestra.plugin.compress.FileCompress | io.kestra.plugin.compress.FileDecompress | io.kestra.core.tasks.scripts.Python | io.kestra.plugin.scripts.python.Commands | io.kestra.plugin.scripts.python.Script | io.kestra.plugin.debezium.postgres.Capture | io.kestra.plugin.azure.batch.job.Create | io.kestra.plugin.azure.batch.pool.Resize | io.kestra.plugin.azure.cli.AzCLI | io.kestra.plugin.azure.datafactory.CreateRun | io.kestra.plugin.azure.eventhubs.Consume | io.kestra.plugin.azure.eventhubs.Produce | io.kestra.plugin.azure.storage.blob.Copy | io.kestra.plugin.azure.storage.blob.Delete | io.kestra.plugin.azure.storage.blob.DeleteList | io.kestra.plugin.azure.storage.blob.Download | io.kestra.plugin.azure.storage.blob.Downloads | io.kestra.plugin.azure.storage.blob.List | io.kestra.plugin.azure.storage.blob.SharedAccess | io.kestra.plugin.azure.storage.blob.Upload | io.kestra.plugin.azure.storage.table.Bulk | io.kestra.plugin.azure.storage.table.Delete | io.kestra.plugin.azure.storage.table.Get | io.kestra.plugin.azure.storage.table.List | io.kestra.plugin.kafka.Consume | io.kestra.plugin.kafka.Produce | io.kestra.plugin.jdbc.snowflake.Download | io.kestra.plugin.jdbc.snowflake.Query | io.kestra.plugin.jdbc.snowflake.Upload
io.kestra.plugin.kubernetes.models.SideCar object
image string

Default value is : busybox

Default: "busybox"
io.kestra.plugin.ldap.Add object

Creates a new entry, if allowed, for each line of provided LDIF files.##### Examples

Insert entries in LDAP server.

id: ldap_add
namespace: company.team

tasks:
  - id: add
    type: io.kestra.plugin.ldap.Add
    description: What your task is supposed to do and why.
    userDn: cn=admin,dc=orga,dc=en
    password: admin
    inputs:
       - "{{outputs.someTask.uri_of_ldif_formated_file}}"
    hostname: 0.0.0.0
    port: 18060

hostname string required

Hostname for connection.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
inputs string[] required

List of URI(s) of file(s) containing LDIF formatted entries to input into LDAP.

password string required

User password for connection.

port string required

A whole number describing the port for connection.

type const: "io.kestra.plugin.ldap.Add" required
Constant: "io.kestra.plugin.ldap.Add"
userDn string required

User DN for connection.

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.ldap.Delete object

Remove entries based on a targeted DN list.##### Examples

id: ldap_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.ldap.Delete
    description: What your task is supposed to do and why.
    userDn: cn=admin,dc=orga,dc=fr
    password: admin
    inputs:
       - "{{ outputs.some_task.uri_of_ldif_formated_file }}"
    hostname: 0.0.0.0
    port: 15060

hostname string required

Hostname for connection.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
inputs string[] required

Targeted DN(s) in the LDAP.

password string required

User password for connection.

port string required

A whole number describing the port for connection.

type const: "io.kestra.plugin.ldap.Delete" required
Constant: "io.kestra.plugin.ldap.Delete"
userDn string required

User DN for connection.

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.ldap.IonToLdif object

Transform .ion files to .ldif ones.##### Examples

YAML: Make LDIF entries from ION ones.

id: ldap_ion_to_ldif
namespace: company.team

inputs:
  - id: file1
    type: FILE
  - id: file2
    type: FILE

tasks:
  - id: ion_to_ldiff
    type: io.kestra.plugin.ldap.IonToLdif
    inputs:
      - "{{ inputs.file1 }}"
      - "{{ inputs.file2 }}"

INPUT example: here's an ION file content that may be inputted :

# simple entry
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",attributes:{description:["Some description","Some other description"],someOtherAttribute:["perhaps","perhapsAgain"]}}
# modify changeRecord
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",changeType:"modify",modifications:[{operation:"DELETE",attribute:"description",values:["Some description 3"]},{operation:"ADD",attribute:"description",values:["Some description 4"]},{operation:"REPLACE",attribute:"someOtherAttribute",values:["Loves herself more"]}]}
# delete changeRecord
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",changeType:"delete"}
# moddn changeRecord (it is mandatory to specify a newrdn and a deleteoldrdn)
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",changeType:"moddn",newDn:{newrdn:"[email protected]",deleteoldrdn:false,newsuperior:"ou=expeople,dc=example,dc=com"}}
# moddn changeRecord without new superior (it is optional to specify a new superior field)
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",changeType:"moddn",newDn:{newrdn:"[email protected]",deleteoldrdn:true}}

OUTPUT example: here's an LDIF file content that may be outputted :

# simple entry
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
description: Some description
someOtherAttribute: perhaps
description: Some other description
someOtherAttribute: perhapsAgain

# modify changeRecord
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
changetype: modify
delete: description
description: Some description 3
-
add: description
description: Some description 4
-
replace: someOtherAttribute
someOtherAttribute: Loves herself more
-

# delete changeRecord
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
changetype: delete

# moddn with new superior
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
changetype: moddn
newrdn: [email protected]
deleteoldrdn: 0
newsuperior: ou=expeople,dc=example,dc=com

# moddn without new superior
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
changetype: moddn
newrdn: [email protected]
deleteoldrdn: 1

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
inputs string[] required
type const: "io.kestra.plugin.ldap.IonToLdif" required
Constant: "io.kestra.plugin.ldap.IonToLdif"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.ldap.LdifToIon object

Transform .ldif files to .ion ones.##### Examples

Make ION entries from LDIF ones.

id: ldap_ldif_to_ion
namespace: company.team

inputs:
  - id: file1
    type: FILE
  - id: file2
    type: FILE

tasks:
  - id: ldif_to_ion
    type: io.kestra.plugin.ldap.LdifToIon
    inputs:
      - "{{ inputs.file1 }}"
      - "{{ inputs.file2 }}"


INPUT example : here's an LDIF file content that may be inputted :

# simple entry
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
description: Some description
someOtherAttribute: perhaps
description: Some other description
someOtherAttribute: perhapsAgain

# modify changeRecord
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
changetype: modify
delete: description
description: Some description 3
-
add: description
description: Some description 4
-
replace: someOtherAttribute
someOtherAttribute: Loves herself more
-

# delete changeRecord
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
changetype: delete

# moddn and modrdn are equals, what's mandatory is to specify in the following order : newrdn -> deleteoldrdn -> (optional) newsuperior
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
changetype: modrdn
newrdn: [email protected]
deleteoldrdn: 0
newsuperior: ou=expeople,dc=example,dc=com

# moddn without new superior
dn: [email protected],ou=diffusion_list,dc=orga,dc=com
changetype: moddn
newrdn: [email protected]
deleteoldrdn: 1

OUTPUT example : here's an ION file content that may be outputted :

# simple entry
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",attributes:{description:["Some description","Some other description"],someOtherAttribute:["perhaps","perhapsAgain"]}}
# modify changeRecord
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",changeType:"modify",modifications:[{operation:"DELETE",attribute:"description",values:["Some description 3"]},{operation:"ADD",attribute:"description",values:["Some description 4"]},{operation:"REPLACE",attribute:"someOtherAttribute",values:["Loves herself more"]}]}
# delete changeRecord
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",changeType:"delete"}
# moddn changeRecord (it is mandatory to specify a newrdn and a deleteoldrdn)
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",changeType:"moddn",newDn:{newrdn:"[email protected]",deleteoldrdn:false,newsuperior:"ou=expeople,dc=example,dc=com"}}
# moddn changeRecord without new superior (it is optional to specify a new superior field)
{dn:"[email protected],ou=diffusion_list,dc=orga,dc=com",changeType:"moddn",newDn:{newrdn:"[email protected]",deleteoldrdn:true}}

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
inputs string[] required
type const: "io.kestra.plugin.ldap.LdifToIon" required
Constant: "io.kestra.plugin.ldap.LdifToIon"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.ldap.Modify object

Modify, Delete or Add attributes or DNs following LDIF changeType fields of each entries provided.##### Examples

Modify entries in LDAP server.

id: ldap_modify
namespace: company.team

tasks:
  - id: modify
    type: io.kestra.plugin.ldap.Modify
    userDn: cn=admin,dc=orga,dc=en
    password: admin
    inputs:
       - "{{ outputs.some_task.uri_of_ldif_change_record_formated_file }}"
    hostname: 0.0.0.0
    port: 18060

hostname string required

Hostname for connection.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
inputs string[] required

List of URI(s) of file(s) containing LDIF formatted entries to modify into LDAP. Entries must provide a changeType field.

password string required

User password for connection.

port string required

A whole number describing the port for connection.

type const: "io.kestra.plugin.ldap.Modify" required
Constant: "io.kestra.plugin.ldap.Modify"
userDn string required

User DN for connection.

allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.ldap.Search object

Search and list entries based on a filter list for each base DN target.##### Examples

Retrieve LDAP entries. In this example, assuming that their is exactly one entry matching each of our filter, the outputs of the task would be four entries in this order (since we search two times in the same baseDn) : (dn, description, mail) of {melusine, metatron, melusine, metatron}.

id: ldap_search
namespace: company.team

tasks:
  - id: search
    type: io.kestra.plugin.ldap.Search
    userDn: cn=admin,dc=orga,dc=en
    password: admin
    baseDn: ou=people,dc=orga,dc=en
    filter: (|(sn=melusine*)(sn=metatron*))
    attributes:
      - description
      - mail
    hostname: 0.0.0.0
    port: 15060

hostname string required

Hostname for connection.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
password string required

User password for connection.

port string required

A whole number describing the port for connection.

type const: "io.kestra.plugin.ldap.Search" required
Constant: "io.kestra.plugin.ldap.Search"
userDn string required

User DN for connection.

allowFailure boolean

Default value is : false

Default: false
attributes string[]

Specific attributes to retrieve from the filtered entries. Retrieves all attributes by default. Sepcial attributes may be specified : "+" -> OPERATIONAL_ATTRIBUTES "1.1" -> NO_ATTRIBUTES "0.0" -> ALL_ATTRIBUTES_EXCEPT_OPERATIONAL `--> This special attribute canno't be combined with other attributes and the search will ignore everything else.

Default value is : - '*'

Default value is : - '*'

Default:
[
  "*"
]
baseDn string

Base DN target in the LDAP.

Default value is : ou=system

Default: "ou=system"
description string
disabled boolean

Default value is : false

Default: false
filter string

Filter for the search in the LDAP.

Default value is : (objectclass=*)

Default: "(objectclass=*)"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sub
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.linear.issues.Create object
Examples
id: linear_issues_create
namespace: company.team

tasks:
  - id: create_issue
    type: io.kestra.plugin.linear.issues.Create
    token: your_api_token
    team: MyTeamName
    title: "Increased 5xx in Demo Service"
    description: "The number of 5xx has increased beyond the threshold for Demo service."
    labels:
      - Bug
      - Workflow

Create an issue when a Kestra workflow in any namespace with company as prefix fails.

id: create_ticket_on_failure
namespace: system

tasks:
  - id: create_issue
    type: io.kestra.plugin.linear.issues.Create
    token: your_api_token
    team: MyTeamName
    title: Workflow failed
    description: "{{ execution.id }} has failed on {{ taskrun.startDate }}. See the link below for more details."
    labels:
      - Bug
      - Workflow

triggers:
  - id: on_failure
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: company
        comparison: PREFIX

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.linear.issues.Create" required
Constant: "io.kestra.plugin.linear.issues.Create"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
labels string[]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
team string
timeout string
format=duration
title string
token string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.malloy.CLI object
Examples

Create a Malloy script and run the malloy-cli run command.

id: malloy
namespace: company.team

tasks:
  - id: run_malloy
    type: io.kestra.plugin.malloy.CLI
    inputFiles:
      model.malloy: |
        source: my_model is duckdb.table('https://huggingface.co/datasets/kestra/datasets/raw/main/csv/iris.csv')

        run: my_model -> {
            group_by: variety
            aggregate:
                avg_petal_width is avg(petal_width)
                avg_petal_length is avg(petal_length)
                avg_sepal_width is avg(sepal_width)
                avg_sepal_length is avg(sepal_length)
        }
    commands:
      - malloy-cli run model.malloy

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.malloy.CLI" required
Constant: "io.kestra.plugin.malloy.CLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
commands string[]
containerImage string

Default value is : ghcr.io/kestra-io/malloy

Default: "ghcr.io/kestra-io/malloy"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.meilisearch.DocumentAdd object

Add one or multiple documents to a Meilisearch DB##### Examples

Add Document to Meilisearch

    id: meilisearch-add-flow
        namespace: company.team

    variables:
      host: http://172.18.0.3:7700/

    tasks:
      - id: http_download
        type: io.kestra.plugin.core.http.Download
        uri: https://pokeapi.co/api/v2/pokemon/jigglypuff

      - id: to_ion
        type: io.kestra.plugin.serdes.json.JsonToIon
        from: "{{ outputs.http_download.uri }}"

      - id: add
        type: io.kestra.plugin.meilisearch.DocumentAdd
        index: "pokemon"
        url: "{{ vars.host }}"
        key: "MASTER_KEY"
        data: "{{ outputs.to_ion.uri }}"

data object required
3 nested properties
fromList object[]
fromMap object
fromURI string | string
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
index string required

Index of the collection you want to add documents to

key string required
type const: "io.kestra.plugin.meilisearch.DocumentAdd" required
Constant: "io.kestra.plugin.meilisearch.DocumentAdd"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.meilisearch.DocumentGet object

Get a json document from Meilisearch using id and index##### Examples

Get Document from Meilisearch

    id: meilisearch-get-flow
        namespace: company.team

    variables:
      id: a123
      index: pokemons
      host: http://172.18.0.3:7700/

    tasks:
      - id: get_document
        type: io.kestra.plugin.meilisearch.DocumentGet
        index: {{ vars.index }}
        documentId: {{ vars.id }}
        url: "{{ vars.host }}"
        key: "MASTER_KEY"

documentId string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
index string required

Index of the collections you want to retrieve your document from

key string required
type const: "io.kestra.plugin.meilisearch.DocumentGet" required
Constant: "io.kestra.plugin.meilisearch.DocumentGet"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.meilisearch.FacetSearch object

Perform a facet search from a Meilisearch DB. WARNING: make sure to set the filterable attributes before.

Examples

Sample facet search

    facetQuery: "fiction",
    facetName: "genre",
    filters:
        -"rating > 3"
    url: "http://localhost:7700",
    key: "MASTER_KEY",
    index: "movies"

    id: meilisearch-facet-search-flow
        namespace: company.team

    variables:
      index: movies
      facetQuery: fiction
      facetName: genre
      host: http://172.18.0.3:7700/

    tasks:
      - id: facet_search_documents
        type: io.kestra.plugin.meilisearch.FacetSearch
        index: {{ vars.index }}
        facetQuery: {{ vars.facetQuery }}
        facetName: {{ vars.facetName }}
        filters:
            - "rating > 3"
        url: "{{ vars.host }}"
        key: "MASTER_KEY"

      - id: to_json
        type: io.kestra.plugin.serdes.json.IonToJson
        from: "{{ outputs.search_documents.uri }}"

facetName string required

Name of the facet you wan to perform a search on (ex: facetName: "genre" on a film collection)

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
index string required

Index of the collection you want to search in

key string required
type const: "io.kestra.plugin.meilisearch.FacetSearch" required
Constant: "io.kestra.plugin.meilisearch.FacetSearch"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
facetQuery string

Query that will be used on the specified facetName

Default value is : --- ""

Default: ""
filters string[]

Additional filters to apply to your facet search

Default value is : "[]"

Default value is : "[]"

Default: "[]"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.meilisearch.Search object

Perform a basic search query on a Meilisearch database with specific query and return the results in an .ion file##### Examples

    id: meilisearch-search-flow
        namespace: company.team

    variables:
      index: movies
      query: "Lord of the Rings"
      host: http://172.18.0.3:7700/

    tasks:
      - id: search_documents
        type: io.kestra.plugin.meilisearch.Search
        index: {{ vars.index }}
        query: {{ vars.query }}
        url: "{{ vars.host }}"
        key: "MASTER_KEY"

      - id: to_json
        type: io.kestra.plugin.serdes.json.IonToJson
        from: "{{ outputs.search_documents.uri }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.meilisearch.Search" required
Constant: "io.kestra.plugin.meilisearch.Search"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
index string

Index of the collection you want to perform a search on

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
query string

Query performed to search on a specific collection

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.Copy object
Examples
id: minio_copy
namespace: company.team

tasks:
  - id: copy
    type: io.kestra.plugin.minio.Copy
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    from:
      bucket: "my-bucket"
      key: "path/to/file"
    to:
      bucket: "my-bucket2"
      key: "path/to/file2"

Copy file in an S3-compatible storage — here, Spaces Object Storage from Digital Ocean.

id: s3_compatible_copy
namespace: company.team

tasks:
  - id: copy_file
    type: io.kestra.plugin.minio.Copy
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    endpoint: https://<region>.digitaloceanspaces.com
    from:
      bucket: "my-bucket"
      key: "path/to/file"
    to:
      bucket: "my-bucket2"
      key: "path/to/file2"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.Copy" required
Constant: "io.kestra.plugin.minio.Copy"
accessKeyId string
allowFailure boolean

Default value is : false

Default: false
bucket string
delete boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
endpoint string
from
All of: io.kestra.plugin.minio.Copy-CopyObjectFrom object, The source bucket and key.
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string
timeout string
format=duration
to
All of: io.kestra.plugin.minio.Copy-CopyObject object, The destination bucket and key.
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.Copy-CopyObject object
bucket string
key string
io.kestra.plugin.minio.Copy-CopyObjectFrom object
bucket string
key string
versionId string
io.kestra.plugin.minio.CreateBucket object
Examples

Create a new bucket with some options

id: minio_create_bucket
namespace: company.team

tasks:
  - id: create_bucket
    type: io.kestra.plugin.minio.CreateBucket
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"

Create a new bucket on an S3-compatible storage — here, Spaces Object Storage from Digital Ocean.

id: s3_compatible_bucket
namespace: company.team

tasks:
  - id: create_bucket
    type: io.kestra.plugin.minio.CreateBucket
    accessKeyId: "<access_key>"
    secretKeyId: "<secret_key>"
    endpoint: https://<region>.digitaloceanspaces.com  #example region: nyc3, tor1
    bucket: "kestra-test-bucket"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.CreateBucket" required
Constant: "io.kestra.plugin.minio.CreateBucket"
accessKeyId string
allowFailure boolean

Default value is : false

Default: false
bucket string
description string
disabled boolean

Default value is : false

Default: false
endpoint string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
objectLockEnabledForBucket boolean
region string
secretKeyId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.Delete object
Examples
id: minio_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.minio.Delete
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    key: "path/to/file"

Delete file from an S3-compatible storage — here, Spaces Object Storage from Digital Ocean.

id: s3_compatible_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.minio.Delete
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    endpoint: https://<region>.digitaloceanspaces.com
    bucket: "kestra-test-bucket"
    key: "path/to/file"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.Delete" required
Constant: "io.kestra.plugin.minio.Delete"
accessKeyId string
allowFailure boolean

Default value is : false

Default: false
bucket string
bypassGovernanceRetention boolean
description string
disabled boolean

Default value is : false

Default: false
endpoint string
key string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.DeleteList object
Examples
id: minio_delete_objects
namespace: company.team

tasks:
  - id: delete_objects
    type: io.kestra.plugin.minio.DeleteList
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"

Delete files from an S3-compatible storage — here, Spaces Object Storage from Digital Ocean.

id: s3_compatible_delete_objects
namespace: company.team

tasks:
  - id: delete_objects
    type: io.kestra.plugin.minio.DeleteList
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    endpoint: https://<region>.digitaloceanspaces.com
    bucket: "kestra-test-bucket"
    prefix: "sub-dir"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.DeleteList" required
Constant: "io.kestra.plugin.minio.DeleteList"
accessKeyId string
allowFailure boolean

Default value is : false

Default: false
bucket string
concurrent integer
min=2
delimiter string
description string
disabled boolean

Default value is : false

Default: false
endpoint string
errorOnEmpty boolean

Default value is : false

Default: false
filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
marker string

Start listing after this specified key. Marker can be any key in the bucket.

maxKeys integer

By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

Default value is : 1000

Default: 1000
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

region string
secretKeyId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.Download object
Examples
id: minio_download
namespace: company.team

tasks:
  - id: download_from_storage
    type: io.kestra.plugin.minio.Download
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    key: "path/to/file"

Download file from an S3-compatible storage — here, Spaces Object Storage from Digital Ocean.

id: s3_compatible_download
namespace: company.team

tasks:
  - id: download_from_storage
    type: io.kestra.plugin.minio.Download
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    endpoint: https://<region>.digitaloceanspaces.com
    bucket: "kestra-test-bucket"
    key: "data/orders.csv"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.Download" required
Constant: "io.kestra.plugin.minio.Download"
accessKeyId string
allowFailure boolean

Default value is : false

Default: false
bucket string
description string
disabled boolean

Default value is : false

Default: false
endpoint string
key string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
region string
secretKeyId string
timeout string
format=duration
versionId string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.Downloads object
Examples
id: minio_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.minio.Downloads
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"
    action: "DELETE"

Download files from an S3-compatible storage — here, Spaces Object Storage from Digital Ocean.

id: s3_compatible_downloads
namespace: company.team

tasks:
  - id: downloads
    type: io.kestra.plugin.minio.Downloads
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    endpoint: https://<region>.digitaloceanspaces.com
    bucket: "kestra-test-bucket"
    prefix: "data/orders"
    action: "DELETE"

action string required
Values: "MOVE" "DELETE" "NONE"
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.Downloads" required
Constant: "io.kestra.plugin.minio.Downloads"
accessKeyId string
allowFailure boolean

Default value is : false

Default: false
bucket string
delimiter string
description string
disabled boolean

Default value is : false

Default: false
endpoint string
filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
marker string

Start listing after this specified key. Marker can be any key in the bucket.

maxKeys integer

By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

Default value is : 1000

Default: 1000
moveTo
All of: io.kestra.plugin.minio.Copy-CopyObject object, The destination bucket and key for `MOVE` action.
prefix string
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

region string
secretKeyId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.List object
Examples
id: minio_list
namespace: company.team

tasks:
  - id: list_objects
    type: io.kestra.plugin.minio.List
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"

List files from an S3-compatible storage — here, Spaces Object Storage from Digital Ocean.

id: s3_compatible_list
namespace: company.team

tasks:
  - id: list_objects
    type: io.kestra.plugin.minio.List
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    endpoint: https://<region>.digitaloceanspaces.com
    bucket: "kestra-test-bucket"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.List" required
Constant: "io.kestra.plugin.minio.List"
accessKeyId string
allowFailure boolean

Default value is : false

Default: false
bucket string
delimiter string
description string
disabled boolean

Default value is : false

Default: false
endpoint string
filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
includeVersions boolean

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
marker string

Start listing after this specified key. Marker can be any key in the bucket.

maxKeys integer

By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

Default value is : 1000

Default: 1000
prefix string
recursive boolean

Default value is : true

Default: true
regexp string

ex: regExp: .* to match all files regExp: .*2020-01-0.\\.csv to match files between 01 and 09 of january ending with .csv

region string
secretKeyId string
startAfter string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.Trigger object

This trigger will list every interval a bucket. You can search for all files in a bucket or directory in from or you can filter the files with a regExp. The detection is atomic, internally we do a list and interact only with files listed. Once a file is detected, we download the file on internal storage and processed with declared action in order to move or delete the files from the bucket (to avoid double detection on new poll).##### Examples

Wait for a list of files on a bucket and iterate through the files.

id: minio_listen
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
    value: "{{ trigger.objects | jq('.[].uri') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.minio.Trigger
    interval: "PT5M"
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"
    action: MOVE
    moveTo: 
      key: archive

Wait for a list of files on a bucket and iterate through the files. Delete files manually after processing to prevent infinite triggering.

id: minio_listen
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
      - id: delete
        type: io.kestra.plugin.minio.Delete
        accessKeyId: "<access-key>"
        secretKeyId: "<secret-key>"
        region: "eu-central-1"
        bucket: "my-bucket"
        key: "{{ taskrun.value }}"
    value: "{{ trigger.objects | jq('.[].key') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.minio.Trigger
    interval: "PT5M"
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    bucket: "my-bucket"
    prefix: "sub-dir"
    action: NONE

Wait for a list of files on a bucket on an S3-compatible storage — here, Spaces Object Storage from Digital Ocean. Iterate through those files, and move it to another folder.

id: trigger_on_s3_compatible_storage
namespace: company.team
tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ taskrun.value }}"
    value: "{{ trigger.objects | jq('.[].uri') }}"

triggers:
  - id: watch
    type: io.kestra.plugin.minio.Trigger
    interval: "PT5M"
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    endpoint: https://<region>>.digitaloceanspaces.com
    bucket: "kestra-test-bucket"
    prefix: "sub-dir"
    action: MOVE
    moveTo:
      key: archive

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.Trigger" required
Constant: "io.kestra.plugin.minio.Trigger"
accessKeyId string
action string
Values: "MOVE" "DELETE" "NONE"
bucket string
conditions array
delimiter string
description string
disabled boolean

Default value is : false

Default: false
endpoint string
filter string

Default value is : BOTH

Default: "BOTH"
Values: "FILES" "DIRECTORY" "BOTH"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
marker string
maxKeys integer

Default value is : 1000

Default: 1000
moveTo object
2 nested properties
bucket string
key string
prefix string
regexp string
region string
secretKeyId string
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.minio.Upload object
Examples
id: minio_upload
namespace: company.team

inputs:
  id: file
  type: FILE

tasks:
  - id: upload_to_storage
    type: io.kestra.plugin.minio.Upload
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    region: "eu-central-1"
    from: "{{ inputs.file }}"
    bucket: "my-bucket"
    key: "path/to/file"

Upload file to an S3-compatible storage — here, Spaces Object Storage from Digital Ocean.

id: s3_compatible_upload
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/orders.csv
  - id: upload_to_storage
    type: io.kestra.plugin.minio.Upload
    accessKeyId: "<access-key>"
    secretKeyId: "<secret-key>"
    endpoint: https://<region>.digitaloceanspaces.com  #example regions: nyc3, tor1
    bucket: "kestra-test-bucket"
    from: "{{ outputs.http_download.uri }}"
    key: "data/orders.csv"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.minio.Upload" required
Constant: "io.kestra.plugin.minio.Upload"
accessKeyId string
allowFailure boolean

Default value is : false

Default: false
bucket string
contentType string
description string
disabled boolean

Default value is : false

Default: false
endpoint string

Can be a single file, a list of files or json array.

key string

a full key (with filename) or the directory path if from is multiple files.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata object
region string
secretKeyId string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.modal.cli.ModalCLI object
Examples

Execute a Python script on a GPU-powered instance in the cloud using Modal. Make sure to add the script that you want to orchestrate as a Namespace File in the Editor and point to it in the commands section.

id: modal
namespace: company.team

tasks:
  - id: modal_cli
    type: io.kestra.plugin.modal.cli.ModalCLI
    namespaceFiles:
      enabled: true
    commands:
      - modal run scripts/gpu.py
    env:
      MODAL_TOKEN_ID: "{{ secret('MODAL_TOKEN_ID') }}"
      MODAL_TOKEN_SECRET: "{{ secret('MODAL_TOKEN_SECRET') }}"

Execute a Python script from Git on a cloud VM using Modal.

id: modal_git
namespace: company.team

tasks:
  - id: repository
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone
        type: io.kestra.plugin.git.Clone
        branch: main
        url: https://github.com/kestra-io/scripts

      - id: modal_cli
        type: io.kestra.plugin.modal.cli.ModalCLI
        commands:
          - modal run modal/getting_started.py
        env:
          MODAL_TOKEN_ID: "{{ secret('MODAL_TOKEN_ID') }}"
          MODAL_TOKEN_SECRET: "{{ secret('MODAL_TOKEN_SECRET') }}"

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.modal.cli.ModalCLI" required
Constant: "io.kestra.plugin.modal.cli.ModalCLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ghcr.io/kestra-io/modal

Default: "ghcr.io/kestra-io/modal"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mongodb.Bulk object

Here are the sample file contents that can be provided as input to Bulk task:

{ "insertOne" : {"firstName": "John", "lastName": "Doe", "city": "Paris"}}
{ "insertOne" : {"firstName": "Ravi", "lastName": "Singh", "city": "Mumbai"}}
{ "deleteMany": {"filter": {"city": "Bengaluru"}}}
Examples
id: mongodb_bulk
namespace: company.team

inputs:
  - id: myfile
    type: FILE

tasks:
  - id: bulk
    type: io.kestra.plugin.mongodb.Bulk
    connection:
      uri: "mongodb://root:example@localhost:27017/?authSource=admin"
    database: "my_database"
    collection: "my_collection"
    from: "{{ inputs.myfile }}"

collection string required
connection required
All of: io.kestra.plugin.mongodb.MongoDbConnection object, MongoDB connection properties.
database string required
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.mongodb.Bulk" required
Constant: "io.kestra.plugin.mongodb.Bulk"
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mongodb.Delete object
Examples
id: mongodb_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.mongodb.Delete
    connection:
      uri: "mongodb://root:example@localhost:27017/?authSource=admin"
    database: "my_database"
    collection: "my_collection"
    operation: "DELETE_ONE"
    filter:
      _id:
        $oid: 60930c39a982931c20ef6cd6

collection string required
connection required
All of: io.kestra.plugin.mongodb.MongoDbConnection object, MongoDB connection properties.
database string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.mongodb.Delete" required
Constant: "io.kestra.plugin.mongodb.Delete"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
filter

Can be a BSON string, or a map.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
operation string

Default value is : DELETE_ONE

Default: "DELETE_ONE"
Values: "DELETE_ONE" "DELETE_MANY"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mongodb.Find object
Examples
id: mongodb_find
namespace: company.team

tasks:
  - id: find
    type: io.kestra.plugin.mongodb.Find
    connection:
      uri: "mongodb://root:example@localhost:27017/?authSource=admin"
    database: "my_database"
    collection: "my_collection"
    filter:
      _id:
        $oid: 60930c39a982931c20ef6cd6

collection string required
connection required
All of: io.kestra.plugin.mongodb.MongoDbConnection object, MongoDB connection properties.
database string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.mongodb.Find" required
Constant: "io.kestra.plugin.mongodb.Find"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
filter

Can be a BSON string, or a map.

limit integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projection

Can be a BSON string, or a map.

skip integer
sort

Can be a BSON string, or a map.

store boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mongodb.InsertOne object
Examples

Insert a document with a map.

id: mongodb_insertone
namespace: company.team

tasks:
  - id: insertone
    type: io.kestra.plugin.mongodb.InsertOne
    connection:
      uri: "mongodb://root:example@localhost:27017/?authSource=admin"
    database: "my_database"
    collection: "my_collection"
    document:
      _id:
        $oid: 60930c39a982931c20ef6cd6
      name: "John Doe"
      city: "Paris"

Insert a document from a JSON string.

id: mongodb_insertone
namespace: company.team

tasks:
  - id: insertone
    type: io.kestra.plugin.mongodb.InsertOne
    connection:
      uri: "mongodb://root:example@localhost:27017/?authSource=admin"
    database: "my_database"
    collection: "my_collection"
    document: "{{ outputs.task_id.data | json }}"

collection string required
connection required
All of: io.kestra.plugin.mongodb.MongoDbConnection object, MongoDB connection properties.
database string required
document required

Can be a BSON string, or a map.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.mongodb.InsertOne" required
Constant: "io.kestra.plugin.mongodb.InsertOne"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mongodb.Load object
Examples
id: mongodb_load
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: load
    type: io.kestra.plugin.mongodb.Load
    connection:
      uri: "mongodb://root:example@localhost:27017/?authSource=admin"
    database: "my_database"
    collection: "my_collection"
    from: "{{ inputs.file }}"

collection string required
connection required
All of: io.kestra.plugin.mongodb.MongoDbConnection object, MongoDB connection properties.
database string required
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.mongodb.Load" required
Constant: "io.kestra.plugin.mongodb.Load"
allowFailure boolean

Default value is : false

Default: false
chunk integer

Default value is : 1000

Default: 1000
description string
disabled boolean

Default value is : false

Default: false
idKey string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
removeIdKey boolean

Default value is : true

Default: true
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mongodb.MongoDbConnection object
uri string required

URL format like mongodb://mongodb0.example.com:27017

minLength=1
io.kestra.plugin.mongodb.Trigger object
Examples

Wait for a MongoDB query to return results, and then iterate through rows.

id: mongodb_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.mongodb.Trigger
    interval: "PT5M"
    connection:
      uri: mongodb://root:example@localhost:27017/?authSource=admin
    database: samples
    collection: books
    filter:
      pageCount:
        $gte: 50
    sort:
      pageCount: -1
    projection:
      title: 1
      publishedDate: 1
      pageCount: 1

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.mongodb.Trigger" required
Constant: "io.kestra.plugin.mongodb.Trigger"
collection string
conditions array
connection object
1 nested properties
uri string required

URL format like mongodb://mongodb0.example.com:27017

minLength=1
database string
description string
disabled boolean

Default value is : false

Default: false
filter
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
limit integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
projection
skip integer
sort
stopAfter string[]
store boolean

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mongodb.Update object
Examples

Replace a document.

id: mongodb_update
namespace: company.team

tasks:
  - id: update
    type: io.kestra.plugin.mongodb.Update
    connection:
      uri: "mongodb://root:example@localhost:27017/?authSource=admin"
    database: "my_database"
    collection: "my_collection"
    operation: "REPLACE_ONE"
    document:
      _id:
        $oid: 60930c39a982931c20ef6cd6
      name: "John Doe"
      city: "Paris"
    filter:
      _id:
        $oid: 60930c39a982931c20ef6cd6

Update a document.

id: mongodb_update
namespace: company.team

tasks:
  - id: update
    type: io.kestra.plugin.mongodb.Update
    connection:
      uri: "mongodb://root:example@localhost:27017/?authSource=admin"
    database: "my_database"
    collection: "my_collection"
    filter:
      _id:
        $oid: 60930c39a982931c20ef6cd6
    document: "{"$set": { "tags": ["blue", "green", "red"]}}"

collection string required
connection required
All of: io.kestra.plugin.mongodb.MongoDbConnection object, MongoDB connection properties.
database string required
document required

Can be a BSON string, or a map.

filter required

Can be a BSON string, or a map.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.mongodb.Update" required
Constant: "io.kestra.plugin.mongodb.Update"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
operation string

Default value is : UPDATE_ONE

Default: "UPDATE_ONE"
Values: "REPLACE_ONE" "UPDATE_ONE" "UPDATE_MANY"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mqtt.Publish object
Examples
id: mqtt_publish
namespace: company.team

tasks:
  - id: publish
    type: io.kestra.plugin.mqtt.Publish
    server: tcp://localhost:1883
    clientId: kestraProducer
    topic: kestra/sensors/cpu
    serdeType: JSON
    retain: true
    from:
      type: "sensors"
      value: 1.23

    id: mqtt_publish
    namespace: company.team

    tasks:
      - id: publish
        type: io.kestra.plugin.mqtt.Publish
        server: ssl://localhost:8883
        clientId: kestraProducer
        topic: kestra/sensors/cpu
        crt: /home/path/to/ca.crt
        serdeType: JSON
        retain: true
        from:
          type: "sensors"
          value: 1.23

clientId string required

A client identifier clientId must be specified and be less that 65535 characters. It must be unique across all clients connecting to the same server. The clientId is used by the server to store data related to the client, hence it is important that the clientId remain the same when connecting to a server if durable subscriptions or reliable messaging are required. As the client identifier is used by the server to identify a client when it reconnects, the client must use the same identifier between connections if durable subscriptions or reliable delivery of messages is required.

from string | array | object required

Can be an internal storage uri, a map or a list.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
serdeType string required
Values: "STRING" "JSON" "BYTES"
server string required

The serverURI parameter is typically used with the the clientId parameter to form a key. The key is used to store and reference messages while they are being delivered. The address of the server to connect to is specified as a URI. Two types of connection are supported tcp:// for a TCP connection and ssl:// for a TCP connection secured by SSL/TLS. For example:

  • tcp://localhost:1883
  • ssl://localhost:8883 If the port is not specified, it will default to 1883 for tcp://" URIs, and 8883 for ssl:// URIs.
topic string required
type const: "io.kestra.plugin.mqtt.Publish" required
Constant: "io.kestra.plugin.mqtt.Publish"
allowFailure boolean

Default value is : false

Default: false
authMethod string

Only available if version = V5 If set, this value contains the name of the authentication method to be used for extended authentication. If null, extended authentication is not performed.

connectionTimeout string

This value defines the maximum time interval the client will wait for the network connection to the MQTT server to be established. The default timeout is 30 seconds. A value of 0 disables timeout processing meaning the client will wait until the network connection is made successfully or fails.

format=duration
crt string
description string
disabled boolean

Default value is : false

Default: false
httpsHostnameVerificationEnabled boolean

This value will allow all ca certificate.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
qos integer
  • Quality of Service 0: indicates that a message should be delivered at most once (zero or one times). The message will not be persisted to disk, and will not be acknowledged across the network. This QoS is the fastest, but should only be used for messages which are not valuable - note that if the server cannot process the message (for example, there is an authorization problem). Also known as "fire and forget".
  • Quality of Service 1: indicates that a message should be delivered at least once (one or more times). The message can only be delivered safely if it can be persisted, so the application must supply a means of persistence using MqttConnectOptions. If a persistence mechanism is not specified, the message will not be delivered in the event of a client failure. The message will be acknowledged across the network.
  • Quality of Service 2: indicates that a message should be delivered once. The message will be persisted to disk, and will be subject to a two-phase acknowledgement across the network. The message can only be delivered safely if it can be persisted, so the application must supply a means of persistence using MqttConnectOptions. If a persistence mechanism is not specified, the message will not be delivered in the event of a client failure. If persistence is not configured, QoS 1 and 2 messages will still be delivered in the event of a network or server problem as the client will hold state in memory. If the MQTT client is shutdown or fails and persistence is not configured then delivery of QoS 1 and 2 messages can not be maintained as client-side state will be lost.

Default value is : 1

Default: 1
retain boolean

Sending a message with retained set to true and with an empty byte array as the payload e.g. null will clear the retained message from the server.

Default value is : false

Default: false
timeout string
format=duration
username string
version string

Default value is : V5

Default: "V5"
Values: "V3" "V5"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mqtt.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.mqtt.Trigger instead.##### Examples

Consume a message from MQTT topics in real-time.

id: mqtt_realtime_trigger
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.payload }}"

triggers:
  - id: realtime_trigger
    type: io.kestra.plugin.mqtt.RealtimeTrigger
    server: tcp://localhost:1883
    clientId: kestraProducer
    topic:
      - kestra/sensors/cpu
      - kestra/sensors/mem
    serdeType: JSON
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topic required

Can be a string or a List of string to consume from multiple topic

type const: "io.kestra.plugin.mqtt.RealtimeTrigger" required
Constant: "io.kestra.plugin.mqtt.RealtimeTrigger"
authMethod string
clientId string
conditions array
connectionTimeout string
format=duration
crt string
description string
disabled boolean

Default value is : false

Default: false
httpsHostnameVerificationEnabled boolean
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
qos integer
  • Quality of Service 0: indicates that a message should be delivered at most once (zero or one times). The message will not be persisted to disk, and will not be acknowledged across the network. This QoS is the fastest, but should only be used for messages which are not valuable - note that if the server cannot process the message (for example, there is an authorization problem). Also known as "fire and forget".
  • Quality of Service 1: indicates that a message should be delivered at least once (one or more times). The message can only be delivered safely if it can be persisted, so the application must supply a means of persistence using MqttConnectOptions. If a persistence mechanism is not specified, the message will not be delivered in the event of a client failure. The message will be acknowledged across the network.
  • Quality of Service 2: indicates that a message should be delivered once. The message will be persisted to disk, and will be subject to a two-phase acknowledgement across the network. The message can only be delivered safely if it can be persisted, so the application must supply a means of persistence using MqttConnectOptions. If a persistence mechanism is not specified, the message will not be delivered in the event of a client failure. If persistence is not configured, QoS 1 and 2 messages will still be delivered in the event of a network or server problem as the client will hold state in memory. If the MQTT client is shutdown or fails and persistence is not configured then delivery of QoS 1 and 2 messages can not be maintained as client-side state will be lost.

Default value is : 1

Default: 1
serdeType string

Default value is : JSON

Default: "JSON"
Values: "STRING" "JSON" "BYTES"
server string
stopAfter string[]
username string
version string

Default value is : V5

Default: "V5"
Values: "V3" "V5"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mqtt.Subscribe object
Examples
id: mqtt_subscribe
namespace: company.team

tasks:
  - id: subscribe
    type: io.kestra.plugin.mqtt.Subscribe
    server: tcp://localhost:1883
    clientId: kestraProducer
    topic:
      - kestra/sensors/cpu
      - kestra/sensors/mem
    serdeType: JSON
    maxRecords: 10

id: mqtt_subscribe
namespace: company.team

tasks:
  - id: subscribe
    type: io.kestra.plugin.mqtt.Subscribe
    server: ssl://localhost:8883
    clientId: kestraProducer
    topic:
      - kestra/sensors/cpu
      - kestra/sensors/mem
    crt: /home/path/to/ca.crt
    serdeType: JSON
    maxRecords: 10

clientId string required

A client identifier clientId must be specified and be less that 65535 characters. It must be unique across all clients connecting to the same server. The clientId is used by the server to store data related to the client, hence it is important that the clientId remain the same when connecting to a server if durable subscriptions or reliable messaging are required. As the client identifier is used by the server to identify a client when it reconnects, the client must use the same identifier between connections if durable subscriptions or reliable delivery of messages is required.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
server string required

The serverURI parameter is typically used with the the clientId parameter to form a key. The key is used to store and reference messages while they are being delivered. The address of the server to connect to is specified as a URI. Two types of connection are supported tcp:// for a TCP connection and ssl:// for a TCP connection secured by SSL/TLS. For example:

  • tcp://localhost:1883
  • ssl://localhost:8883 If the port is not specified, it will default to 1883 for tcp://" URIs, and 8883 for ssl:// URIs.
topic required

Can be a string or a List of string to consume from multiple topic

type const: "io.kestra.plugin.mqtt.Subscribe" required
Constant: "io.kestra.plugin.mqtt.Subscribe"
allowFailure boolean

Default value is : false

Default: false
authMethod string

Only available if version = V5 If set, this value contains the name of the authentication method to be used for extended authentication. If null, extended authentication is not performed.

connectionTimeout string

This value defines the maximum time interval the client will wait for the network connection to the MQTT server to be established. The default timeout is 30 seconds. A value of 0 disables timeout processing meaning the client will wait until the network connection is made successfully or fails.

format=duration
crt string
description string
disabled boolean

Default value is : false

Default: false
httpsHostnameVerificationEnabled boolean

This value will allow all ca certificate.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second

password string
qos integer
  • Quality of Service 0: indicates that a message should be delivered at most once (zero or one times). The message will not be persisted to disk, and will not be acknowledged across the network. This QoS is the fastest, but should only be used for messages which are not valuable - note that if the server cannot process the message (for example, there is an authorization problem). Also known as "fire and forget".
  • Quality of Service 1: indicates that a message should be delivered at least once (one or more times). The message can only be delivered safely if it can be persisted, so the application must supply a means of persistence using MqttConnectOptions. If a persistence mechanism is not specified, the message will not be delivered in the event of a client failure. The message will be acknowledged across the network.
  • Quality of Service 2: indicates that a message should be delivered once. The message will be persisted to disk, and will be subject to a two-phase acknowledgement across the network. The message can only be delivered safely if it can be persisted, so the application must supply a means of persistence using MqttConnectOptions. If a persistence mechanism is not specified, the message will not be delivered in the event of a client failure. If persistence is not configured, QoS 1 and 2 messages will still be delivered in the event of a network or server problem as the client will hold state in memory. If the MQTT client is shutdown or fails and persistence is not configured then delivery of QoS 1 and 2 messages can not be maintained as client-side state will be lost.

Default value is : 1

Default: 1
serdeType string

Default value is : JSON

Default: "JSON"
Values: "STRING" "JSON" "BYTES"
timeout string
format=duration
username string
version string

Default value is : V5

Default: "V5"
Values: "V3" "V5"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.mqtt.Trigger object

Note that you don't need an extra task to consume the message from the event trigger. The trigger will automatically consume messages and you can retrieve their content in your flow using the {{ trigger.uri }} variable. If you would like to consume each message from MQTT topics in real-time and create one execution per message, you can use the io.kestra.plugin.mqtt.RealtimeTrigger instead.##### Examples

    id: mqtt_trigger
    namespace: company.team

    tasks:
      - id: log
        type: io.kestra.plugin.core.log.Log
        message: "{{ trigger.payload }}"

    triggers:
      - id: trigger
        type: io.kestra.plugin.mqtt.Trigger
        server: tcp://localhost:1883
        clientId: kestraProducer
        topic:
          - kestra/sensors/cpu
          - kestra/sensors/mem
        serdeType: JSON
        maxRecords: 10

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topic required

Can be a string or a List of string to consume from multiple topic

type const: "io.kestra.plugin.mqtt.Trigger" required
Constant: "io.kestra.plugin.mqtt.Trigger"
authMethod string
clientId string
conditions array
connectionTimeout string
format=duration
crt string
description string
disabled boolean

Default value is : false

Default: false
httpsHostnameVerificationEnabled boolean
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second

password string
qos integer
  • Quality of Service 0: indicates that a message should be delivered at most once (zero or one times). The message will not be persisted to disk, and will not be acknowledged across the network. This QoS is the fastest, but should only be used for messages which are not valuable - note that if the server cannot process the message (for example, there is an authorization problem). Also known as "fire and forget".
  • Quality of Service 1: indicates that a message should be delivered at least once (one or more times). The message can only be delivered safely if it can be persisted, so the application must supply a means of persistence using MqttConnectOptions. If a persistence mechanism is not specified, the message will not be delivered in the event of a client failure. The message will be acknowledged across the network.
  • Quality of Service 2: indicates that a message should be delivered once. The message will be persisted to disk, and will be subject to a two-phase acknowledgement across the network. The message can only be delivered safely if it can be persisted, so the application must supply a means of persistence using MqttConnectOptions. If a persistence mechanism is not specified, the message will not be delivered in the event of a client failure. If persistence is not configured, QoS 1 and 2 messages will still be delivered in the event of a network or server problem as the client will hold state in memory. If the MQTT client is shutdown or fails and persistence is not configured then delivery of QoS 1 and 2 messages can not be maintained as client-side state will be lost.

Default value is : 1

Default: 1
serdeType string

Default value is : JSON

Default: "JSON"
Values: "STRING" "JSON" "BYTES"
server string
stopAfter string[]
username string
version string

Default value is : V5

Default: "V5"
Values: "V3" "V5"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.nats.Consume object

Please note that the server you run it against must have JetStream enabled for it to work. It should also have a stream configured to match the given subject.##### Examples

Consume messages from any topic subject matching the kestra.> wildcard, using user password authentication.

id: nats_consume_messages
namespace: company.team

tasks:
  - id: consume
    type: io.kestra.plugin.nats.Consume
    url: nats://localhost:4222
    username: nats_user
    password: nats_password
    subject: kestra.>
    durableId: someDurableId
    pollDuration: PT5S

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subject string required
minLength=1
type const: "io.kestra.plugin.nats.Consume" required
Constant: "io.kestra.plugin.nats.Consume"
url string required

The format is (nats://)server_url:port. You can also provide a connection token like so: nats://token@server_url:port

minLength=1
allowFailure boolean

Default value is : false

Default: false
batchSize integer

Default value is : 10

Default: 10
min=1
deliverPolicy string

Possible settings are:

  • All: The default policy. The consumer will start receiving from the earliest available message.
  • Last: When first consuming messages, the consumer will start receiving messages with the last message added to the stream, or the last message in the stream that matches the consumer's filter subject if defined.
  • New: When first consuming messages, the consumer will only start receiving messages that were created after the consumer was created.
  • ByStartSequence: When first consuming messages, start at the first message having the sequence number or the next one available.
  • ByStartTime: When first consuming messages, start with messages on or after this time. The consumer is required to specify since which defines this start time.
  • LastPerSubject: When first consuming messages, start with the latest one for each filtered subject currently in the stream.

Default value is : All

Default: "All"
Values: "All" "Last" "New" "ByStartSequence" "ByStartTime" "LastPerSubject"
description string
disabled boolean

Default value is : false

Default: false
durableId string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second

format=duration
maxRecords integer
password string
pollDuration string

If no messages are available, define the max duration to wait for new messages

Default value is : 2.000000000

Default: 2.0
format=duration
since string

By default, we consume all messages from the subjects starting from beginning of logs or depending on the current durable id position. You can also provide an arbitrary start time to get all messages since this date for a new durable id. Note that if you don't provide a durable id, you will retrieve all messages starting from this date even after subsequent usage of this task.Must be a valid iso 8601 date.

timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.nats.Produce object
Examples

Produce a single message to kestra.publish subject, using user password authentication.

id: nats_produce_single_message
namespace: company.team

tasks:
  - id: produce
    type: io.kestra.plugin.nats.Produce
    url: nats://localhost:4222
    username: nats_user
    password: nats_password
    subject: kestra.publish
    from:
      headers:
        someHeaderKey: someHeaderValue
      data: Some message

Produce 2 messages to kestra.publish subject, using user password authentication.

id: nats_produce_two_messages
namespace: company.team

tasks:
  - id: produce
    type: io.kestra.plugin.nats.Produce
    url: nats://localhost:4222
    username: nats_user
    password: nats_password
    subject: kestra.publish
    from:
      - headers:
          someHeaderKey: someHeaderValue
        data: Some message
      - data: Another message

Produce messages (1 / row) from an internal storage file to kestra.publish subject, using user password authentication.

id: nats_produce_messages_from_file
namespace: company.team

tasks:
  - id: produce
    type: io.kestra.plugin.nats.Produce
    url: nats://localhost:4222
    username: nats_user
    password: nats_password
    subject: kestra.publish
    from: "{{ outputs.some_task_with_output_file.uri }}"

from string | array | object required

Can be an internal storage uri, a map or a list.with the following format: headers, data

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subject string required
minLength=1
type const: "io.kestra.plugin.nats.Produce" required
Constant: "io.kestra.plugin.nats.Produce"
url string required

The format is (nats://)server_url:port. You can also provide a connection token like so: nats://token@server_url:port

minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.nats.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.nats.Trigger instead.##### Examples

Subscribe to a NATS subject, getting every message from the beginning of the subject on first trigger execution.

id: nats
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: watch
    type: io.kestra.plugin.nats.RealtimeTrigger
    url: nats://localhost:4222
    username: nats_user
    password: nats_password
    subject: kestra.trigger
    durableId: natsTrigger
    deliverPolicy: All

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subject string required
minLength=1
type const: "io.kestra.plugin.nats.RealtimeTrigger" required
Constant: "io.kestra.plugin.nats.RealtimeTrigger"
url string required

The format is (nats://)server_url:port. You can also provide a connection token like so: nats://token@server_url:port

minLength=1
batchSize integer

Default value is : 10

Default: 10
min=1
conditions array
deliverPolicy string

Possible settings are:

  • All: The default policy. The consumer will start receiving from the earliest available message.
  • Last: When first consuming messages, the consumer will start receiving messages with the last message added to the stream, or the last message in the stream that matches the consumer's filter subject if defined.
  • New: When first consuming messages, the consumer will only start receiving messages that were created after the consumer was created.
  • ByStartSequence: When first consuming messages, start at the first message having the sequence number or the next one available.
  • ByStartTime: When first consuming messages, start with messages on or after this time. The consumer is required to specify since which defines this start time.
  • LastPerSubject: When first consuming messages, start with the latest one for each filtered subject currently in the stream.

Default value is : All

Default: "All"
Values: "All" "Last" "New" "ByStartSequence" "ByStartTime" "LastPerSubject"
description string
disabled boolean

Default value is : false

Default: false
durableId string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
since string

By default, we consume all messages from the subjects starting from beginning of logs or depending on the current durable id position. You can also provide an arbitrary start time to get all messages since this date for a new durable id. Note that if you don't provide a durable id, you will retrieve all messages starting from this date even after subsequent usage of this task.Must be a valid iso 8601 date.

stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.nats.Trigger object

If you would like to consume each message from a NATS subject in real-time and create one execution per message, you can use the io.kestra.plugin.nats.RealtimeTrigger instead.##### Examples

Subscribe to a NATS subject, getting every message from the beginning of the subject on first trigger execution.

id: nats
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.data }}"

triggers:
  - id: watch
    type: io.kestra.plugin.nats.Trigger
    url: nats://localhost:4222
    username: nats_user
    password: nats_password
    subject: kestra.trigger
    durableId: natsTrigger
    deliverPolicy: All
    maxRecords: 1

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subject string required
minLength=1
type const: "io.kestra.plugin.nats.Trigger" required
Constant: "io.kestra.plugin.nats.Trigger"
url string required

The format is (nats://)server_url:port. You can also provide a connection token like so: nats://token@server_url:port

minLength=1
batchSize integer

Default value is : 10

Default: 10
min=1
conditions array
deliverPolicy string

Possible settings are:

  • All: The default policy. The consumer will start receiving from the earliest available message.
  • Last: When first consuming messages, the consumer will start receiving messages with the last message added to the stream, or the last message in the stream that matches the consumer's filter subject if defined.
  • New: When first consuming messages, the consumer will only start receiving messages that were created after the consumer was created.
  • ByStartSequence: When first consuming messages, start at the first message having the sequence number or the next one available.
  • ByStartTime: When first consuming messages, start with messages on or after this time. The consumer is required to specify since which defines this start time.
  • LastPerSubject: When first consuming messages, start with the latest one for each filtered subject currently in the stream.

Default value is : All

Default: "All"
Values: "All" "Last" "New" "ByStartSequence" "ByStartTime" "LastPerSubject"
description string
disabled boolean

Default value is : false

Default: false
durableId string
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second

format=duration
maxRecords integer
password string
pollDuration string

If no messages are available, define the max duration to wait for new messages

Default value is : 2.000000000

Default: 2.0
format=duration
since string

By default, we consume all messages from the subjects starting from beginning of logs or depending on the current durable id position. You can also provide an arbitrary start time to get all messages since this date for a new durable id. Note that if you don't provide a durable id, you will retrieve all messages starting from this date even after subsequent usage of this task.Must be a valid iso 8601 date.

stopAfter string[]
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.nats.kv.CreateBucket object
Examples

Creates a new Key/Value bucket, with all required properties.

id: nats_kv_create_bucket
namespace: company.team

tasks:
  - id: create_bucket
    type: io.kestra.plugin.nats.kv.CreateBucket
    url: nats://localhost:4222
    username: nats_user
    password: nats_passwd
    name: my_bucket

Creates a new Key/Value bucket.

id: nats_kv_create_bucket
namespace: company.team

tasks:
  - id: create_bucket
    type: io.kestra.plugin.nats.kv.CreateBucket
    url: nats://localhost:4222
    username: nats_user
    password: nats_passwd
    name: my_bucket
    description: my bucket for special purposes
    historyPerKey: 2
    bucketSize: 1024
    valueSize: 1024
    metadata: {"key1":"value1","key2":"value2"}

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
name string required
minLength=1
type const: "io.kestra.plugin.nats.kv.CreateBucket" required
Constant: "io.kestra.plugin.nats.kv.CreateBucket"
url string required

The format is (nats://)server_url:port. You can also provide a connection token like so: nats://token@server_url:port

minLength=1
allowFailure boolean

Default value is : false

Default: false
bucketSize integer
description string
disabled boolean

Default value is : false

Default: false
historyPerKey integer

Default value is : 1

Default: 1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
metadata object
password string
timeout string
format=duration
username string
valueSize integer
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.nats.kv.Delete object
Examples
id: nats_kv_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.nats.kv.Delete
    url: nats://localhost:4222
    username: nats_user
    password: nats_passwd
    bucketName: my_bucket
     keys:
      - key1
      - key2

bucketName string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
keys string[] required
type const: "io.kestra.plugin.nats.kv.Delete" required
Constant: "io.kestra.plugin.nats.kv.Delete"
url string required

The format is (nats://)server_url:port. You can also provide a connection token like so: nats://token@server_url:port

minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.nats.kv.Get object
Examples

Gets a value from a NATS Key/Value bucket by keys.

id: nats_kv_get
namespace: company.team

tasks:
  - id: get
    type: io.kestra.plugin.nats.kv.Get
    url: nats://localhost:4222
    username: nats_user
    password: nats_passwd
    bucketName: my_bucket
    keys:
      - key1
      - key2

Gets a value from a NATS Key/Value bucket by keys with revisions.

   id: nats_kv_get
            namespace: company.team

            tasks:
              - id: get
                type: io.kestra.plugin.nats.kv.Get
    url: nats://localhost:4222
                username: nats_user
                password: nats_passwd
                bucketName: my_bucket
                keyRevisions:
                  - key1: 1
                  - key2: 3

bucketName string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
keys string[] required
type const: "io.kestra.plugin.nats.kv.Get" required
Constant: "io.kestra.plugin.nats.kv.Get"
url string required

The format is (nats://)server_url:port. You can also provide a connection token like so: nats://token@server_url:port

minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
keyRevisions object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.nats.kv.Put object
Examples
id: nats_kv_put
namespace: company.team

tasks:
  - id: put
    type: io.kestra.plugin.nats.kv.Put
    url: nats://localhost:4222
    username: nats_user
    password: nats_passwd
    bucketName: my_bucket
    values:
      - key1: value1
      - key2: value2
      - key3:
        - subKey1: some other value

bucketName string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.nats.kv.Put" required
Constant: "io.kestra.plugin.nats.kv.Put"
url string required

The format is (nats://)server_url:port. You can also provide a connection token like so: nats://token@server_url:port

minLength=1
values object required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.neo4j.Batch object
Examples
id: neo4j_batch
namespace: company.team

tasks:
  - id: batch
    type: io.kestra.plugin.neo4j.Batch
    url: "{{ url }}"
    username: "{{ username }}"
    password: "{{ password }}"
    query: |
       UNWIND $props AS properties
       MERGE (y:Year {year: properties.year})
       MERGE (y)<-[:IN]-(e:Event {id: properties.id})

       RETURN e.id AS x ORDER BY x

    from: "{{ outputs.previous_task_id.uri }}"
    chunk: 1000

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
query string required

The query must have the row : "UNWIND $props AS X" with $props the variable where we input the source data for the batch.

type const: "io.kestra.plugin.neo4j.Batch" required
Constant: "io.kestra.plugin.neo4j.Batch"
allowFailure boolean

Default value is : false

Default: false
bearerToken string
chunk integer

Default value is : 1000

Default: 1000
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string

If not specified, won't use basic auth

timeout string
format=duration
url string

The URL can either be in HTTP or Bolt format

username string

If not specified, won't use basic

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.neo4j.Query object
Examples
    id: neo4j_query
    namespace: company.team

    tasks:
      - id: query
        type: io.kestra.plugin.neo4j.Query
        url: "{{ url }}"
        username: "{{ username }}"
        password: "{{ password }}"
        query: |
            MATCH (p:Person)
            RETURN p
        storeType: FETCH

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.neo4j.Query" required
Constant: "io.kestra.plugin.neo4j.Query"
allowFailure boolean

Default value is : false

Default: false
bearerToken string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string

If not specified, won't use basic auth

query string
storeType string

FETCHONE output the first rowFETCH output all the rowSTORE store all row in a fileNONE do nothing

Default value is : NONE

Default: "NONE"
Values: "STORE" "FETCH" "FETCHONE" "NONE"
timeout string
format=duration
url string

The URL can either be in HTTP or Bolt format

username string

If not specified, won't use basic

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.discord.DiscordExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the DiscordIncomingWebhook task.##### Examples

Send a Discord notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.discord.DiscordExecution
    url: "{{ secret('DISCORD_WEBHOOK') }}" # format: https://hooks.discord.com/services/xzy/xyz/xyz
    username: "MyUsername"
    embedList:
        - title: "Discord Notification"
          color:
              - 255
              - 255
              - 255
    executionId: "{{trigger.executionId}}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.discord.DiscordExecution" required
Constant: "io.kestra.plugin.notifications.discord.DiscordExecution"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
avatarUrl string
content string
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
embedList array
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
templateRenderMap object
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.discord.DiscordIncomingWebhook object

Add this task to a list of errors tasks to implement custom flow-level failure notifications. Check the Discord documentation for more details..##### Examples

Send a Discord notification on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.discord.DiscordIncomingWebhook
    url: "{{ secret('DISCORD_WEBHOOK') }}" # https://discord.com/api/webhooks/000000/xxxxxxxxxxx
    payload: |
      {
        "username": "MyUsername",
        "content": "Failure alert for flow {{ flow.namespace }}.{{ flow.id }} with ID {{ execution.id }}"
        "embedList": [{
                "title": "Discord Notification"
            }]
      }

Send a Discord message via incoming webhook

id: discord_incoming_webhook
namespace: company.team

tasks:
  - id: send_discord_message
    type: io.kestra.plugin.notifications.discord.DiscordIncomingWebhook
    url: "{{ secret('DISCORD_WEBHOOK') }}"
    payload: |
      {
        "username": "MyUsername",
        "tts": false,
        "content": "Hello from the workflow {{ flow.id }}",
        "embeds": [
            {
                "title": "Discord Hello",
                "color": 16777215
                "description": "Namespace: dev
Flow ID: discord
Execution ID: 1p0JVFz24ZVLSK8iJN6hfs
Execution Status: SUCCESS

[Link to the Execution page](http://localhost:8080/ui/executions/dev/discord/1p0JVFz24ZVLSK8iJN6hfs)",
                "footer": {
                    "text": "Succeeded after 00:00:00.385"
                }
            }
        ]
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.discord.DiscordIncomingWebhook" required
Constant: "io.kestra.plugin.notifications.discord.DiscordIncomingWebhook"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.discord.DiscordTemplate-Embed object
authorName string
color integer[]

Example: [255, 255, 255]

description string
footer string
thumbnail string
title string
websiteUrl string
io.kestra.plugin.notifications.google.GoogleChatExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the GoogleChatIncomingWebhook task.##### Examples

Send a Google Chat notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.google.GoogleChatExecution
    url: "{{ secret('GOOGLE_WEBHOOK') }}" # format: https://chat.googleapis.com/v1/spaces/xzy/messages
    text: "Google Chat Notification"
    executionId: "{{trigger.executionId}}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.google.GoogleChatExecution" required
Constant: "io.kestra.plugin.notifications.google.GoogleChatExecution"
url string required

Check the Create an Incoming Webhook documentation for more details..

minLength=1
allowFailure boolean

Default value is : false

Default: false
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
templateRenderMap object
text string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook object

Add this task to a list of errors tasks to implement custom flow-level failure notifications. Check the Google documentation for more details..##### Examples

Send a Google Chat notification on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook
    url: "{{ secret('GOOGLE_WEBHOOK') }}" # https://chat.googleapis.com/v1/spaces/xzy/messages?threadKey=errorThread
    payload: |
      {
        "text": "Google Chat Alert"
      }

Send a Google Chat message via incoming webhook

id: google_incoming_webhook
namespace: company.team

tasks:
  - id: send_google_chat_message
    type: io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook
    url: "{{ secret('GOOGLE_WEBHOOK') }}"
    payload: |
      {
        "text": "Google Chat Hello"
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook" required
Constant: "io.kestra.plugin.notifications.google.GoogleChatIncomingWebhook"
url string required

Check the Create an Incoming Webhook documentation for more details..

minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.mail.MailExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger, as shown in this example. Don't use this notification task in errors tasks. Instead, for errors tasks, use the MailSend task.##### Examples

Send an email notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.mail.MailExecution
    to: [email protected]
    from: [email protected]
    subject: "The workflow execution {{trigger.executionId}} failed for the flow {{trigger.flowId}} in the namespace {{trigger.namespace}}"
    host: mail.privateemail.com
    port: 465
    username: "{{ secret('EMAIL_USERNAME') }}"
    password: "{{ secret('EMAIL_PASSWORD') }}"
    executionId: "{{ trigger.executionId }}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.mail.MailExecution" required
Constant: "io.kestra.plugin.notifications.mail.MailExecution"
allowFailure boolean

Default value is : false

Default: false
attachments array

The attachment will be shown in the email client as separate files available for download, or displayed inline if the client supports it (for example, most browsers display PDF's in a popup window)

cc string

Note that each email address must be compliant with the RFC2822 format

customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
embeddedImages array

The provided images are assumed to be of MIME type png, jpg or whatever the email client supports as valid image that can be embedded in HTML content

executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
from string
host string
htmlTextContent string

Both text and HTML can be provided, which will be offered to the email client as alternative contentEmail clients that support it, will favor HTML over plain text and ignore the text body completely

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
plainTextContent string

Both text and HTML can be provided, which will be offered to the email client as alternative contentEmail clients that support it, will favor HTML over plain text and ignore the text body completely

port integer
sessionTimeout integer

It controls the maximum timeout value when sending emails

Default value is : 10000

Default: 10000
subject string
templateRenderMap object
timeout string
format=duration
to string

Note that each email address must be compliant with the RFC2822 format

transportStrategy string

Will default to SMTPS if left empty

Default value is : SMTPS

Default: "SMTPS"
Values: "SMTP" "SMTPS" "SMTP_TLS" "SMTP_OAUTH2"
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.mail.MailSend object
Examples

Send an email on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: send_email
    type: io.kestra.plugin.notifications.mail.MailSend
    from: [email protected]
    to: [email protected]
    username: "{{ secret('EMAIL_USERNAME') }}"
    password: "{{ secret('EMAIL_PASSWORD') }}"
    host: mail.privateemail.com
    port: 465 # or 587
    subject: "Kestra workflow failed for the flow {{flow.id}} in the namespace {{flow.namespace}}"
    htmlTextContent: "Failure alert for flow {{ flow.namespace }}.{{ flow.id }} with ID {{ execution.id }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.mail.MailSend" required
Constant: "io.kestra.plugin.notifications.mail.MailSend"
allowFailure boolean

Default value is : false

Default: false
attachments array

The attachment will be shown in the email client as separate files available for download, or displayed inline if the client supports it (for example, most browsers display PDF's in a popup window)

cc string

Note that each email address must be compliant with the RFC2822 format

description string
disabled boolean

Default value is : false

Default: false
embeddedImages array

The provided images are assumed to be of MIME type png, jpg or whatever the email client supports as valid image that can be embedded in HTML content

from string
host string
htmlTextContent string

Both text and HTML can be provided, which will be offered to the email client as alternative contentEmail clients that support it, will favor HTML over plain text and ignore the text body completely

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
plainTextContent string

Both text and HTML can be provided, which will be offered to the email client as alternative contentEmail clients that support it, will favor HTML over plain text and ignore the text body completely

port integer
sessionTimeout integer

It controls the maximum timeout value when sending emails

Default value is : 10000

Default: 10000
subject string
timeout string
format=duration
to string

Note that each email address must be compliant with the RFC2822 format

transportStrategy string

Will default to SMTPS if left empty

Default value is : SMTPS

Default: "SMTPS"
Values: "SMTP" "SMTPS" "SMTP_TLS" "SMTP_OAUTH2"
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.mail.MailSend-Attachment object
name string required
uri string required
contentType string

Note that each email address must be compliant with the RFC2822 format

Default value is : application/octet-stream

Default: "application/octet-stream"
io.kestra.plugin.notifications.opsgenie.OpsgenieAlert object

Add this task to a list of errors tasks to implement custom flow-level failure notifications. Check the Opsgenie documentation for more details..##### Examples

Send a failed flow alert to Opsgenie

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.opsgenie.OpsgenieAlert
    url: "{{ secret('OPSGENIE_REQUEST') }}" # https://api.opsgenie.com/v2/alerts/requests/xxx000xxxxx
    payload: |
      {
        "message":"Kestra Opsgenie alert",
        "alias":"ExecutionError",
        "responders":[
            {"id":"4513b7ea-3b91-438f-b7e4-e3e54af9147c","type":"team"},
            {"id":"bb4d9938-c3c2-455d-aaab-727aa701c0d8","type":"user"},
            {"id":"aee8a0de-c80f-4515-a232-501c0bc9d715","type":"escalation"},
            {"id":"80564037-1984-4f38-b98e-8a1f662df552","type":"schedule"}
         ],
        "visibleTo":[
            {"id":"4513b7ea-3b91-438f-b7e4-e3e54af9147c","type":"team"},
            {"id":"bb4d9938-c3c2-455d-aaab-727aa701c0d8","type":"user"}
         ],
        "tags":["ExecutionFail","Error","Execution"],
        "priority":"P1"
      }
    authorizationToken: sampleAuthorizationToken

Send a Opsgenie alert

id: opsgenie_incoming_webhook
namespace: company.team

tasks:
  - id: send_opsgenie_message
    type: io.kestra.plugin.notifications.opsgenie.OpsgenieAlert
    url: "{{ secret('OPSGENIE_REQUEST') }}"
    payload: |
      {
        "message":"Kestra Opsgenie alert",
        "alias":"Some Execution",
        "responders":[
            {"id":"4513b7ea-3b91-438f-b7e4-e3e54af9147c","type":"team"},
            {"id":"bb4d9938-c3c2-455d-aaab-727aa701c0d8","type":"user"}
         ],
        "visibleTo":[
            {"id":"4513b7ea-3b91-438f-b7e4-e3e54af9147c","type":"team"},
            {"id":"bb4d9938-c3c2-455d-aaab-727aa701c0d8","type":"user"}
         ],
        "tags":["Execution"],
        "priority":"P2"
      }
    authorizationToken: sampleAuthorizationToken

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.opsgenie.OpsgenieAlert" required
Constant: "io.kestra.plugin.notifications.opsgenie.OpsgenieAlert"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
authorizationToken string
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.opsgenie.OpsgenieExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the OpsgenieAlert task.##### Examples

Send notification on a failed flow execution via Opsgenie

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.opsgenie.OpsgenieExecution
    url: "{{ secret('OPSGENIE_REQUEST') }}" # format: 'https://api.opsgenie.com/v2/alerts/requests/xxxxxxyx-yyyx-xyxx-yyxx-yyxyyyyyxxxx'
    message: "Kestra Opsgenie alert"
    alias: ExecutionError
    responders:
      4513b7ea-3b91-438f-b7e4-e3e54af9147c: team
      bb4d9938-c3c2-455d-aaab-727aa701c0d8: user
      aee8a0de-c80f-4515-a232-501c0bc9d715: escalation
      80564037-1984-4f38-b98e-8a1f662df552: schedule
    visibleTo:
      4513b7ea-3b91-438f-b7e4-e3e54af9147c: team
      bb4d9938-c3c2-455d-aaab-727aa701c0d8: user
    priority: P1
    tags:
      - ExecutionError
      - Error
      - Fail
      - Execution
    authorizationToken: sampleAuthorizationToken
    executionId: "{{trigger.executionId}}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.opsgenie.OpsgenieExecution" required
Constant: "io.kestra.plugin.notifications.opsgenie.OpsgenieExecution"
url string required
minLength=1
alias string
allowFailure boolean

Default value is : false

Default: false
authorizationToken string
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
message string
payload string
priority string
responders object
tags string[]
templateRenderMap object
timeout string
format=duration
visibleTo object
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.pagerduty.PagerDutyAlert object

Add this task to a list of errors tasks to implement custom flow-level failure notifications. Check the PagerDuty documentation for more details..##### Examples

Send a PagerDuty alert on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.pagerduty.PagerDutyAlert
    url: "{{ secret('PAGERDUTY_EVENT') }}" # https://events.pagerduty.com/v2/enqueue
    payload: |
      {
        "dedup_key": "samplekey",
        "routing_key": "samplekey",
        "event_action": "trigger",
        "payload" : {
            "summary": "PagerDuty alert",
        }
      }

Send a Discord message via incoming webhook

id: discord_incoming_webhook
namespace: company.team

tasks:
  - id: send_pagerduty_alert
    type: io.kestra.plugin.notifications.pagerduty.PagerDutyAlert
    url: "{{ secret('PAGERDUTY_EVENT') }}"
    payload: |
      {
        "dedup_key": "samplekey",
        "routing_key": "samplekey",
        "event_action": "acknowledge"
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.pagerduty.PagerDutyAlert" required
Constant: "io.kestra.plugin.notifications.pagerduty.PagerDutyAlert"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.pagerduty.PagerDutyExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the PagerDutyAlert task.##### Examples

Send a PagerDuty notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.pagerduty.PagerDutyExecution
    url: "{{ secret('PAGERDUTY_EVENT') }}" # format: https://events.pagerduty.com/v2/enqueue
    payloadSummary: "PagerDuty Alert"
    deduplicationKey: "dedupkey"
    routingKey: "routingkey"
    eventAction: "acknowledge"
    executionId: "{{trigger.executionId}}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.pagerduty.PagerDutyExecution" required
Constant: "io.kestra.plugin.notifications.pagerduty.PagerDutyExecution"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
customFields object
customMessage string
deduplicationKey string
description string
disabled boolean

Default value is : false

Default: false
eventAction string
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
payloadSummary string
maxLength=1024
routingKey string
templateRenderMap object
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.sendgrid.SendGridMailExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger, as shown in this example. Don't use this notification task in errors tasks. Instead, for errors tasks, use the SendGridMailSend task.##### Examples

Send an SendGrid email notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.sendgrid.SendGridMailExecution
    to:
      - [email protected]
    from: [email protected]
    subject: "The workflow execution {{trigger.executionId}} failed for the flow {{trigger.flowId}} in the namespace {{trigger.namespace}}"
    sendgridApiKey: "{{ secret('SENDGRID_API_KEY') }}"
    executionId: "{{ trigger.executionId }}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

from string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sendgridApiKey string required
minLength=1
to string[] required

Note that each email address must be compliant with the RFC2822 format

minItems=1
type const: "io.kestra.plugin.notifications.sendgrid.SendGridMailExecution" required
Constant: "io.kestra.plugin.notifications.sendgrid.SendGridMailExecution"
allowFailure boolean

Default value is : false

Default: false
attachments array

The attachment will be shown in the email client as separate files available for download, or displayed inline if the client supports it (for example, most browsers display PDF's in a popup window)

cc string[]

Note that each email address must be compliant with the RFC2822 format

customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
embeddedImages array

The provided images are assumed to be of MIME type png, jpg or whatever the email client supports as valid image that can be embedded in HTML content

executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
htmlContent string

Both text and HTML can be provided, which will be offered to the email client as alternative contentEmail clients that support it, will favor HTML over plain text and ignore the text body completely

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
subject string
templateRenderMap object
textContent string

Both text and HTML can be provided, which will be offered to the email client as alternative contentEmail clients that support it, will favor HTML over plain text and ignore the text body completely

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.sendgrid.SendGridMailSend object
Examples

Send an email on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: send_email
    type: io.kestra.plugin.notifications.sendgrid.SendGridMailSend
    from: [email protected]
    to:
      - [email protected]
    sendgridApiKey: "{{ secret('SENDGRID_API_KEY') }}"
    subject: "Kestra workflow failed for the flow {{flow.id}} in the namespace {{flow.namespace}}"
    htmlTextContent: "Failure alert for flow {{ flow.namespace }}.{{ flow.id }} with ID {{ execution.id }}"

from string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
sendgridApiKey string required
minLength=1
to string[] required

Note that each email address must be compliant with the RFC2822 format

minItems=1
type const: "io.kestra.plugin.notifications.sendgrid.SendGridMailSend" required
Constant: "io.kestra.plugin.notifications.sendgrid.SendGridMailSend"
allowFailure boolean

Default value is : false

Default: false
attachments array

The attachment will be shown in the email client as separate files available for download, or displayed inline if the client supports it (for example, most browsers display PDF's in a popup window)

cc string[]

Note that each email address must be compliant with the RFC2822 format

description string
disabled boolean

Default value is : false

Default: false
embeddedImages array

The provided images are assumed to be of MIME type png, jpg or whatever the email client supports as valid image that can be embedded in HTML content

htmlContent string

Both text and HTML can be provided, which will be offered to the email client as alternative contentEmail clients that support it, will favor HTML over plain text and ignore the text body completely

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
subject string
textContent string

Both text and HTML can be provided, which will be offered to the email client as alternative contentEmail clients that support it, will favor HTML over plain text and ignore the text body completely

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.sendgrid.SendGridMailSend-Attachment object
name string required
uri string required
contentType string

Note that each email address must be compliant with the RFC2822 format

Default value is : application/octet-stream

Default: "application/octet-stream"
io.kestra.plugin.notifications.sentry.SentryAlert object

Add this task to a list of errors tasks to implement custom flow-level failure notifications.

The only required input is a DSN string value, which you can find when you go to your Sentry project settings and go to the section Client Keys (DSN). You can find more detailed description of how to find your DSN in the following Sentry documentation.

You can customize the alert payload, which is a JSON object, or you can skip it and use the default payload created by kestra. For more information about the payload, check the Sentry Event Payloads documentation.

The event_id is an optional payload attribute that you can use to override the default event ID. If you don't specify it (recommended), kestra will generate a random UUID. You can use this attribute to group events together, but note that this must be a UUID type. For more information, check the Sentry documentation.##### Examples

Send a Sentry alert on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.sentry.SentryAlert
    dsn: "{{ secret('SENTRY_DSN') }}" # format: https://[email protected]/xxx
    endpointType: ENVELOPE

Send a custom Sentry alert

id: sentry_alert
namespace: company.team

tasks:
  - id: send_sentry_message
    type: io.kestra.plugin.notifications.sentry.SentryAlert
    dsn: "{{ secret('SENTRY_DSN') }}"
    endpointType: "ENVELOPE"
    payload: |
      {
          "timestamp": "{{ execution.startDate }}",
          "platform": "java",
          "level": "error",
          "transaction": "/execution/id/{{ execution.id }}",
          "server_name": "localhost:8080",
          "message": {
            "message": "Execution {{ execution.id }} failed"
          },
          "extra": {
            "Namespace": "{{ flow.namespace }}",
            "Flow ID": "{{ flow.id }}",
            "Execution ID": "{{ execution.id }}",
            "Link": "http://localhost:8080/ui/executions/{{flow.namespace}}/{{flow.id}}/{{execution.id}}"
          }
      }
dsn string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.sentry.SentryAlert" required
Constant: "io.kestra.plugin.notifications.sentry.SentryAlert"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
endpointType string

Default value is : ENVELOPE

Default: "ENVELOPE"
Values: "ENVELOPE" "STORE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.sentry.SentryExecution object

The alert message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the SentryAlert task.

The only required input is a DSN string value, which you can find when you go to your Sentry project settings and go to the section Client Keys (DSN). For more detailed description of how to find your DSN, visit the following Sentry documentation.

You can customize the alert payload, which is a JSON object. For more information about the payload, check the Sentry Event Payloads documentation.

The level parameter is the severity of the issue. The task documentation lists all available options including DEBUG, INFO, WARNING, ERROR, FATAL. The default value is ERROR.##### Examples

This monitoring flow is triggered anytime a flow fails in the prod namespace. It then sends a Sentry alert with the execution information. You can fully customize the trigger conditions.

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.sentry.SentryExecution
    transaction: "/execution/id/{{ trigger.executionId }}"
    dsn: "{{ secret('SENTRY_DSN') }}"
    level: ERROR
    executionId: "{{ trigger.executionId }}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true
dsn string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.sentry.SentryExecution" required
Constant: "io.kestra.plugin.notifications.sentry.SentryExecution"
allowFailure boolean

Default value is : false

Default: false
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
endpointType string

Default value is : ENVELOPE

Default: "ENVELOPE"
Values: "ENVELOPE" "STORE"
errors object
eventId string

Default value is : a generated unique identifier

Default: "a generated unique identifier"
pattern=[0-9a-f]{8}[0-9a-f]{4}[0-9a-f]{4}[0-9a-f]{4}[0-9a-f]{12}
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
extra object
level string

Acceptable values are: fatal, error, warning, info, debug.

Default value is : ERROR

Default: "ERROR"
Values: "FATAL" "ERROR" "WARNING" "INFO" "DEBUG"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
platform string

Default value is : JAVA

Default: "JAVA"
Values: "AS3" "C" "CFML" "COCOA" "CSHARP" "ELIXIR" "HASKELL" "GO" "GROOVY" "JAVA" "JAVASCRIPT" "NATIVE" "NODE" "OBJC" "OTHER" "PERL" "PHP" "PYTHON" "RUBY"
serverName string
templateRenderMap object
timeout string
format=duration
transaction string

For example, in a web app, this might be the route name

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.slack.SlackExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the SlackIncomingWebhook task.##### Examples

Send a Slack notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.slack.SlackExecution
    url: "{{ secret('SLACK_WEBHOOK') }}" # format: https://hooks.slack.com/services/xzy/xyz/xyz
    channel: "#general"
    executionId: "{{trigger.executionId}}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

Send a Rocket.Chat notification on a failed flow execution

id: failure_alert
namespace: debug

tasks:
  - id: send_alert_to_rocket_chat
    type: io.kestra.plugin.notifications.slack.SlackExecution
    url: "{{ secret('ROCKET_CHAT_WEBHOOK') }}"
    channel: "#errors"
    executionId: "{{ trigger.executionId }}"
    username: "Kestra TEST"
    iconUrl: "https://avatars.githubusercontent.com/u/59033362?s=48"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: debug
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.slack.SlackExecution" required
Constant: "io.kestra.plugin.notifications.slack.SlackExecution"
url string required

Check the Create an Incoming Webhook documentation for more details..

minLength=1
allowFailure boolean

Default value is : false

Default: false
channel string
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
iconEmoji string
iconUrl string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
templateRenderMap object
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.slack.SlackIncomingWebhook object

Add this task to send direct Slack notifications. Check the Slack documentation for more details..##### Examples

Send a Slack notification on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook
    url: "{{ secret('SLACK_WEBHOOK') }}" # https://hooks.slack.com/services/xzy/xyz/xyz
    payload: |
      {
        "text": "Failure alert for flow {{ flow.namespace }}.{{ flow.id }} with ID {{ execution.id }}"
      }

Send a Slack message via incoming webhook with a text argument

id: slack_incoming_webhook
namespace: company.team

tasks:
  - id: send_slack_message
    type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook
    url: "{{ secret('SLACK_WEBHOOK') }}"
    payload: |
      {
        "text": "Hello from the workflow {{ flow.id }}"
      }

Send a Slack message via incoming webhook with a blocks argument, read more on blocks here

id: slack_incoming_webhook
namespace: company.team

tasks:
  - id: send_slack_message
    type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook
    url: "{{ secret('SLACK_WEBHOOK') }}"
    payload: |
      {
        "blocks": [
    		{
    			"type": "section",
    			"text": {
    				"type": "mrkdwn",
    				"text": "Hello from the workflow *{{ flow.id }}*"
    			}
    		}
    	]
      }

Send a Rocket.Chat message via incoming webhook

id: rocket_chat_notification
namespace: company.team
tasks:
  - id: send_rocket_chat_message
    type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook
    url: "{{ secret('ROCKET_CHAT_WEBHOOK') }}"
    payload: |
      {
        "alias": "Kestra TEST",
        "avatar": "https://avatars.githubusercontent.com/u/59033362?s=48",
        "emoji": ":smirk:",
        "roomId": "#my-channel",
        "text": "Sample",
        "tmshow": true,
        "attachments": [
          {
            "collapsed": false,
            "color": "#ff0000",
            "text": "Yay!",
            "title": "Attachment Example",
            "title_link": "https://rocket.chat",
            "title_link_download": false,
            "fields": [
              {
                "short": false,
                "title": "Test title",
                "value": "Test value"
              },
              {
                "short": true,
                "title": "Test title",
                "value": "Test value"
              }
            ]
          }
        ]
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.slack.SlackIncomingWebhook" required
Constant: "io.kestra.plugin.notifications.slack.SlackIncomingWebhook"
url string required

Check the Create an Incoming Webhook documentation for more details..

minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.teams.TeamsExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the TeamsIncomingWebhook task.##### Examples

Send a Microsoft Teams notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.teams.TeamsExecution
    url: "{{ secret('TEAMS_WEBHOOK') }}" # format: https://microsoft.webhook.office.com/webhook/xyz
    activityTitle: "Kestra Teams notification"
    executionId: "{{ trigger.executionId }}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.teams.TeamsExecution" required
Constant: "io.kestra.plugin.notifications.teams.TeamsExecution"
url string required
minLength=1
activitySubtitle string
activityTitle string
allowFailure boolean

Default value is : false

Default: false
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
templateRenderMap object
themeColor string

Default value is : 0076D7

Default: "0076D7"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.teams.TeamsIncomingWebhook object

Add this task to a list of errors tasks to implement custom flow-level failure noticiations. Check the Microsoft Teams documentation for more details.##### Examples

Send a Microsoft Teams notification on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.teams.TeamsIncomingWebhook
    url: "{{ secret('TEAMS_WEBHOOK') }}" # format: https://microsoft.webhook.office.com/webhook/xyz
    payload: |
      {
        "@type": "MessageCard",
        "@context": "http://schema.org/extensions",
        "themeColor": "0076D7",
        "summary": "Failure alert for flow {{ flow.namespace }}.{{ flow.id }} with ID {{ execution.id }}",
        "sections": [{
        "activityTitle": "Kestra Workflow Notification",
        "activitySubtitle": "Workflow Execution Finished With Errors",
        "markdown": true
        }],
        "potentialAction": [
          {
            "@type": "OpenUri",
            "name": "Kestra Workflow",
            "targets": [
            {
            "os": "default",
            "uri": "{{ vars.systemUrl }}"
            }
            ]
          }
        ]
      }

Send a Microsoft Teams notification message

url: "https://microsoft.webhook.office.com/webhookb2/XXXXXXXXXX"
payload: |
  {
    "@type": "MessageCard",
    "@context": "http://schema.org/extensions",
    "themeColor": "0076D7",
    "summary": "Notification message",
    "sections": [{
      "activityTitle": "Rolling Workflow started",
      "activitySubtitle": "Workflow Notification",
      "markdown": true
    }],
    "potentialAction": [
      {
        "@type": "OpenUri",
        "name": "Rolling Workflow",
        "targets": [
          {
           "os": "default",
           "uri": "{{ vars.systemUrl }}"
          }
        ]
      }
    ]
  }
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.teams.TeamsIncomingWebhook" required
Constant: "io.kestra.plugin.notifications.teams.TeamsIncomingWebhook"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.telegram.TelegramExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the TelegramSend task.##### Examples

Send a Telegram notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.telegram.TelegramExecution
    token: "{{ secret('TELEGRAM_TOKEN') }}" # format: 6090305634:xyz
    channel: "2072728690"
    executionId: "{{ trigger.executionId }}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

channel string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
token string required
type const: "io.kestra.plugin.notifications.telegram.TelegramExecution" required
Constant: "io.kestra.plugin.notifications.telegram.TelegramExecution"
allowFailure boolean

Default value is : false

Default: false
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
templateRenderMap object
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.telegram.TelegramSend object
channel string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
token string required
type const: "io.kestra.plugin.notifications.telegram.TelegramSend" required
Constant: "io.kestra.plugin.notifications.telegram.TelegramSend"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
endpointOverride string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.twilio.TwilioAlert object

Add this task to a list of errors tasks to implement custom flow-level failure notifications. Check the Twilio documentation for more details..##### Examples

Send a Twilio notification on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.twilio.TwilioAlert
    url: "{{ secret('TWILIO_ALERT') }}" # https://notify.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Notifications
    payload: |
      {
        "identity": "0000001"
      }

Send a Twilio message via incoming notification API

id: twilio_alert
namespace: company.team

tasks:
  - id: send_twilio_message
    type: io.kestra.plugin.notifications.twilio.TwilioAlert
    url: "{{ secret('TWILIO_ALERT') }}"
    payload: |
      {
        "identity": "0000001"
      }

accountSID string required
minLength=1
authToken string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.twilio.TwilioAlert" required
Constant: "io.kestra.plugin.notifications.twilio.TwilioAlert"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.twilio.TwilioExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the TwilioAlert task.##### Examples

Send a Twilio notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.twilio.TwilioExecution
    url: "{{ secret('TWILIO_ALERT') }}" # format: https://notify.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Notifications
    identity: 0000001
    executionId: "{{trigger.executionId}}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

accountSID string required
minLength=1
authToken string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.twilio.TwilioExecution" required
Constant: "io.kestra.plugin.notifications.twilio.TwilioExecution"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
body string
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
identity string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
tag string
templateRenderMap object
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.whatsapp.WhatsAppExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the WhatsAppIncomingWebhook task.##### Examples

Send a WhatsApp notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.discord.WhatsAppExecution
    url: "{{ secret('WHATSAPP_WEBHOOK') }}" # format: https://hooks.discord.com/services/xzy/xyz/xyz
    profileName: "MyProfile"
    from: 380999999999
    whatsAppIds:
        - "some waId"
        - "waId No2"
    executionId: "{{trigger.executionId}}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.whatsapp.WhatsAppExecution" required
Constant: "io.kestra.plugin.notifications.whatsapp.WhatsAppExecution"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
from string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
messageId string
payload string
profileName string
recipientId string
templateRenderMap object
textBody string
timeout string
format=duration
whatsAppIds string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook object

Add this task to a list of errors tasks to implement custom flow-level failure notifications. Check the WhatsApp documentation for more details..##### Examples

Send a WhatsApp notification on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook
    url: "{{ secret('WHATSAPP_WEBHOOK') }}" # https://webhook.your-domain
    payload: |
      {
        "profileName": "MyName",
        "whatsAppIds": ["IdNo1, IdNo2"],
        "from": 380999999999
      }

Send a WhatsApp message via incoming webhook

id: whatsapp_incoming_webhook
namespace: company.team

tasks:
  - id: send_whatsapp_message
    type: io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook
    url: "{{ secret('WHATSAPP_WEBHOOK') }}"
    payload: |
      {
        "profileName": "MyName",
        "whatsAppIds": ["IdNo1, IdNo2"],
        "from": 380999999999,
        "messageId": "wamIdNo1"
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook" required
Constant: "io.kestra.plugin.notifications.whatsapp.WhatsAppIncomingWebhook"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.zenduty.ZendutyAlert object

Add this task to a list of errors tasks to implement custom flow-level failure notifications. Check the Zenduty integration documentation and the Zenduty Events API specification for more details.##### Examples

Send a Zenduty alert on a failed flow execution. Make sure that the payload follows the Zenduty Events API specification, including the message and alert_type payload properties, which are required.

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.zenduty.ZendutyAlert
    url: "https://www.zenduty.com/api/events/{{ secret('ZENDUTY_INTEGRATION_KEY') }}/"
    payload: |
      {
        "alert_type": "info",
        "message": "This is info alert",
        "summary": "This is the incident summary",
        "suppressed": false,
        "entity_id": 12345,
        "payload": {
            "status": "ACME Payments are failing",
            "severity": "1",
            "project": "kubeprod"
          },
        "urls": [
          {
            "link_url": "https://www.example.com/alerts/12345/",
            "link_text": "Alert URL"
          }
        ]
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.zenduty.ZendutyAlert" required
Constant: "io.kestra.plugin.notifications.zenduty.ZendutyAlert"
url string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.zenduty.ZendutyExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the ZendutyAlert task.##### Examples

Send a Zenduty notification on a failed flow execution

id: zenduty_failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.zenduty.ZendutyExecution
    url: "https://www.zenduty.com/api/events/{{ secret('ZENDUTY_INTEGRATION_KEY') }}/"
    executionId: "{{ trigger.executionId }}"
    message: Kestra workflow execution {{ trigger.executionId }} of a flow {{ trigger.flowId }} in the namespace {{ trigger.namespace }} changed status to {{ trigger.state }}

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.zenduty.ZendutyExecution" required
Constant: "io.kestra.plugin.notifications.zenduty.ZendutyExecution"
url string required
minLength=1
alertType string
Values: "CRITICAL" "ACKNOWLEDGED" "RESOLVED" "ERROR" "WARNING" "INFO"
allowFailure boolean

Default value is : false

Default: false
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
entityId string
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
message string
payload string
summary string
templateRenderMap object
timeout string
format=duration
urls string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.zulip.ZulipExecution object

The message will include a link to the execution page in the UI along with the execution ID, namespace, flow name, the start date, duration and the final status of the execution, and (if failed) the task that led to a failure.

Use this notification task only in a flow that has a Flow trigger. Don't use this notification task in errors tasks. Instead, for errors tasks, use the ZulipIncomingWebhook task.##### Examples

Send a Zulip notification on a failed flow execution

id: failure_alert
namespace: company.team

tasks:
  - id: send_alert
    type: io.kestra.plugin.notifications.zulip.ZulipExecution
    url: "{{ secret('ZULIP_WEBHOOK') }}" # format: https://yourZulipDomain.zulipchat.com/api/v1/external/INTEGRATION_NAME?api_key=API_KEY
    channel: "#general"
    executionId: "{{trigger.executionId}}"

triggers:
  - id: failed_prod_workflows
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: prod
        prefix: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.zulip.ZulipExecution" required
Constant: "io.kestra.plugin.notifications.zulip.ZulipExecution"
url string required

Check the Incoming Webhook Integrations documentation for more details..

minLength=1
allowFailure boolean

Default value is : false

Default: false
channel string
customFields object
customMessage string
description string
disabled boolean

Default value is : false

Default: false
executionId string

Default is the current execution, change it to {{ trigger.executionId }} if you use this task with a Flow trigger to use the original execution.

Default value is : "{{ execution.id }}"

Default: "{{ execution.id }}"
iconEmoji string
iconUrl string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
templateRenderMap object
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook object

Add this task to send direct Zulip notifications. Check the Zulip documentation for more details..##### Examples

Send a Zulip notification on a failed flow execution

id: unreliable_flow
namespace: company.team

tasks:
  - id: fail
    type: io.kestra.plugin.scripts.shell.Commands
    runner: PROCESS
    commands:
      - exit 1

errors:
  - id: alert_on_failure
    type: io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook
    url: "{{ secret('ZULIP_WEBHOOK') }}" # https://yourZulipDomain.zulipchat.com/api/v1/external/INTEGRATION_NAME?api_key=API_KEY
    payload: |
      {
        "text": "Failure alert for flow {{ flow.namespace }}.{{ flow.id }} with ID {{ execution.id }}"
      }

Send a Zulip message via incoming webhook with a text argument

id: zulip_incoming_webhook
namespace: company.team

tasks:
  - id: send_zulip_message
    type: io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook
    url: "{{ secret('ZULIP_WEBHOOK') }}" # https://yourZulipDomain.zulipchat.com/api/v1/external/INTEGRATION_NAME?api_key=API_KEY
    payload: |
      {
        "text": "Hello from the workflow {{ flow.id }}"
      }

Send a Zulip message via incoming webhook with a blocks argument, read more on blocks here

id: zulip_incoming_webhook
namespace: company.team

tasks:
  - id: send_zulip_message
    type: io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook
    url: "{{ secret('ZULIP_WEBHOOK') }}" # format: https://yourZulipDomain.zulipchat.com/api/v1/external/INTEGRATION_NAME?api_key=API_KEY
    payload: |
      {
        "blocks": [
            {
                "type": "section",
                "text": {
                    "type": "mrkdwn",
                    "text": "Hello from the workflow *{{ flow.id }}*"
                }
            }
        ]
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook" required
Constant: "io.kestra.plugin.notifications.zulip.ZulipIncomingWebhook"
url string required

Check the Incoming Webhook Integrations documentation for more details..

minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
payload string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.openai.ChatCompletion object

For more information, refer to the Chat Completions API docs.##### Examples

Based on a prompt input, generate a completion response and pass it to a downstream task.

id: openai
namespace: company.team

inputs:
  - id: prompt
    type: STRING
    defaults: What is data orchestration?

tasks:
  - id: completion
    type: io.kestra.plugin.openai.ChatCompletion
    apiKey: "yourOpenAIapiKey"
    model: gpt-4o
    prompt: "{{ inputs.prompt }}"

  - id: response
    type: io.kestra.plugin.core.debug.Return
    format: {{ outputs.completion.choices[0].message.content }}"

Based on a prompt input, ask OpenAI to call a function that determines whether you need to respond to a customer's review immediately or wait until later, and then comes up with a suggested response.

id: openai
namespace: company.team

inputs:
  - id: prompt
    type: STRING
    defaults: I love your product and would purchase it again!

tasks:
  - id: prioritize_response
    type: io.kestra.plugin.openai.ChatCompletion
    apiKey: "yourOpenAIapiKey"
    model: gpt-4o
    messages:
      - role: user
        content: "{{ inputs.prompt }}"
    functions:
      - name: respond_to_review
        description: Given the customer product review provided as input, determines how urgently a reply is required and then provides suggested response text.
        parameters:
          - name: response_urgency
            type: string
            description: How urgently this customer review needs a reply. Bad reviews
                         must be addressed immediately before anyone sees them. Good reviews can
                         wait until later.
            required: true
            enumValues:
              - reply_immediately
              - reply_later
          - name: response_text
            type: string
            description: The text to post online in response to this review.
            required: true

  - id: response_urgency
    type: io.kestra.plugin.core.debug.Return
    format: "{{ outputs.prioritize_response.choices[0].message.function_call.arguments.response_urgency }}"

  - id: response_text
    type: io.kestra.plugin.core.debug.Return
    format: "{{ outputs.prioritize_response.choices[0].message.function_call.arguments.response_text }}"

apiKey string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
model string required

See the OpenAI model's documentation page for more details.

type const: "io.kestra.plugin.openai.ChatCompletion" required
Constant: "io.kestra.plugin.openai.ChatCompletion"
allowFailure boolean

Default value is : false

Default: false
clientTimeout integer

Default value is : 10

Default: 10
description string
disabled boolean

Default value is : false

Default: false
frequencyPenalty number
functionCall string

Enter a specific function name, or 'auto' to let the model decide. The default is auto.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
logitBias object
maxTokens integer
messages array

Required if prompt is not set.

n integer
presencePenalty number
prompt string

If not provided, make sure to set the messages property.

stop string[]
temperature number
timeout string
format=duration
topP number
user string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.openai.ChatCompletion-PluginChatFunction object
io.kestra.plugin.openai.ChatCompletion-PluginChatFunctionParameter object
description string required

Provide as many details as possible to ensure the model returns an accurate parameter.

name string required
type string required

Valid types are string, number, integer, boolean, array, object

enumValues string[]

Optional, but useful when for classification problems.

required boolean

Defaults to false.

Default value is : false

Default: false
io.kestra.plugin.openai.CreateImage object

For more information, refer to the OpenAI Image Generation API docs.##### Examples

id: openai
namespace: company.team

tasks:
  - id: create_image
    type: io.kestra.plugin.openai.CreateImage
    prompt: A funny cat in a black suit
    apiKey: <your-api-key>
    download: true
    n: 5

apiKey string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
prompt string required
type const: "io.kestra.plugin.openai.CreateImage" required
Constant: "io.kestra.plugin.openai.CreateImage"
allowFailure boolean

Default value is : false

Default: false
clientTimeout integer

Default value is : 10

Default: 10
description string
disabled boolean

Default value is : false

Default: false
download boolean

If enable, the generated image will be downloaded inside Kestra's internal storage. Else, the URL of the generated image will be available as task output.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
n integer
size string

Default value is : LARGE

Default: "LARGE"
Values: "SMALL" "MEDIUM" "LARGE"
timeout string
format=duration
user string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.powerbi.RefreshGroupDataset object

An asynchronous refresh would be triggered.

clientId string required
minLength=1
clientSecret string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
tenantId string required
minLength=1
type const: "io.kestra.plugin.powerbi.RefreshGroupDataset" required
Constant: "io.kestra.plugin.powerbi.RefreshGroupDataset"
allowFailure boolean

Default value is : false

Default: false
datasetId string
description string
disabled boolean

Default value is : false

Default: false
groupId string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pollDuration string

Default value is : 5.000000000

Default: 5.0
format=duration
timeout string
format=duration
wait boolean

Default value is : false

Default: false
waitDuration string

Default value is : 600.000000000

Default: 600.0
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.pulsar.AbstractPulsarConnection-TlsOptions object
ca string

Must be a base64-encoded pem file.

cert string

Must be a base64-encoded pem file.

key string

Must be a base64-encoded pem file.

io.kestra.plugin.pulsar.Consume object
Examples
id: pulsar_consume
namespace: company.team

tasks:
  - id: consume
    type: io.kestra.plugin.pulsar.Consume
    uri: pulsar://localhost:26650
    topic: test_kestra
    deserializer: JSON
    subscriptionName: kestra_flow

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subscriptionName string required

Using subscription name, we will fetch only records that haven't been consumed yet.

topic required

Can be a string or a list of strings to consume from multiple topics.

type const: "io.kestra.plugin.pulsar.Consume" required
Constant: "io.kestra.plugin.pulsar.Consume"
uri string required

You need to specify a Pulsar protocol URL.

  • Example of localhost: pulsar://localhost:6650
  • If you have multiple brokers: pulsar://localhost:6650,localhost:6651,localhost:6652
  • If you use TLS authentication: pulsar+ssl://pulsar.us-west.example.com:6651
allowFailure boolean

Default value is : false

Default: false
authenticationToken string

Authentication token that can be required by some providers such as Clever Cloud.

consumerName string
consumerProperties Record<string, string>
description string
deserializer
All of: Serializer / Deserializer used for the value. string, Deserializer used for the value.
disabled boolean

Default value is : false

Default: false
encryptionKey string
initialPosition string

Default value is : Earliest

Default: "Earliest"
Values: "Latest" "Earliest"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not a hard limit and is evaluated every second.

format=duration
maxRecords integer

It's not a hard limit and is evaluated every second.

pollDuration string

If no records are available, the maximum wait to wait for a new record.

Default value is : 2.000000000

Default: 2.0
format=duration
schemaString string

Required for connecting with topics with a defined schema and strict schema checking

schemaType string

Can be one of NONE, AVRO or JSON. None means there will be no schema enforced.

Default value is : NONE

Default: "NONE"
Values: "NONE" "AVRO" "JSON"
subscriptionType string

Default value is : Exclusive

Default: "Exclusive"
Values: "Exclusive" "Shared" "Failover" "Key_Shared"
timeout string
format=duration
tlsOptions
All of: io.kestra.plugin.pulsar.AbstractPulsarConnection-TlsOptions object, TLS authentication options.
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.pulsar.Produce object
Examples

Read a CSV file, transform it to the right format, and publish it to Pulsar topic.

    id: produce
    namespace: company.team

    inputs:
      - type: FILE
        id: file

    tasks:
      - id: csv_reader
        type: io.kestra.plugin.serdes.csv.CsvToIon
        from: "{{ inputs.file }}"

      - id: file_transform
        type: io.kestra.plugin.scripts.nashorn.FileTransform
        from: {{ outputs.csv_reader.uri }}"
        script: |
          var result = {
            "key": row.id,
            "value": {
              "username": row.username,
              "tweet": row.tweet
            },
            "eventTime": row.timestamp,
            "properties": {
              "key": "value"
            }
          };
          row = result

      - id: produce
        type: io.kestra.plugin.pulsar.Produce
        from: "{{ outputs.file_transform.uri }}"
        uri: pulsar://localhost:26650
        serializer: JSON
        topic: test_kestra

from required

Can be a Kestra internal storage URI, a map or a list in the following format: key, value, eventTime, properties, deliverAt, deliverAfter and sequenceId.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topic string required
type const: "io.kestra.plugin.pulsar.Produce" required
Constant: "io.kestra.plugin.pulsar.Produce"
uri string required

You need to specify a Pulsar protocol URL.

  • Example of localhost: pulsar://localhost:6650
  • If you have multiple brokers: pulsar://localhost:6650,localhost:6651,localhost:6652
  • If you use TLS authentication: pulsar+ssl://pulsar.us-west.example.com:6651
accessMode string

Possible values are:

  • Shared: By default, multiple producers can publish to a topic.
  • Exclusive: Require exclusive access for producer. Fail immediately if there's already a producer connected.
  • WaitForExclusive: Producer creation is pending until it can acquire exclusive access.
Values: "Shared" "Exclusive" "ExclusiveWithFencing" "WaitForExclusive"
allowFailure boolean

Default value is : false

Default: false
authenticationToken string

Authentication token that can be required by some providers such as Clever Cloud.

compressionType string

By default, message payloads are not compressed. Supported compression types are:

  • NONE: No compression (Default).
  • LZ4: Compress with LZ4 algorithm. Faster but lower compression than ZLib.
  • ZLIB: Standard ZLib compression.
  • ZSTD Compress with Zstandard codec. Since Pulsar 2.3.
  • SNAPPY Compress with Snappy codec. Since Pulsar 2.4.
Values: "NONE" "LZ4" "ZLIB" "ZSTD" "SNAPPY"
description string
disabled boolean

Default value is : false

Default: false
encryptionKey string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
producerName string
producerProperties Record<string, string>
schemaString string

Required for connecting with topics with a defined schema and strict schema checking

schemaType string

Can be one of NONE, AVRO or JSON. None means there will be no schema enforced.

Default value is : NONE

Default: "NONE"
Values: "NONE" "AVRO" "JSON"
serializer
All of: Serializer / Deserializer used for the value. string, Serializer used for the value.
timeout string
format=duration
tlsOptions
All of: io.kestra.plugin.pulsar.AbstractPulsarConnection-TlsOptions object, TLS authentication options.
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.pulsar.Reader object
Examples
id: pulsar_reader
namespace: company.team

tasks:
  - id: reader
    type: io.kestra.plugin.pulsar.Reader
    uri: pulsar://localhost:26650
    topic: test_kestra
    deserializer: JSON

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topic required

Can be a string or a list of strings to consume from multiple topics.

type const: "io.kestra.plugin.pulsar.Reader" required
Constant: "io.kestra.plugin.pulsar.Reader"
uri string required

You need to specify a Pulsar protocol URL.

  • Example of localhost: pulsar://localhost:6650
  • If you have multiple brokers: pulsar://localhost:6650,localhost:6651,localhost:6652
  • If you use TLS authentication: pulsar+ssl://pulsar.us-west.example.com:6651
allowFailure boolean

Default value is : false

Default: false
authenticationToken string

Authentication token that can be required by some providers such as Clever Cloud.

description string
deserializer
All of: Serializer / Deserializer used for the value. string, Deserializer used for the value.
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not a hard limit and is evaluated every second.

format=duration
maxRecords integer

It's not a hard limit and is evaluated every second.

messageId string

The first message read will be the one immediately after the specified message. If no since or messageId are provided, we start at the beginning of the topic.

pollDuration string

If no records are available, the maximum wait to wait for a new record.

Default value is : 2.000000000

Default: 2.0
format=duration
schemaString string

Required for connecting with topics with a defined schema and strict schema checking

schemaType string

Can be one of NONE, AVRO or JSON. None means there will be no schema enforced.

Default value is : NONE

Default: "NONE"
Values: "NONE" "AVRO" "JSON"
since string

So, broker can find a latest message that was published before given duration. eg: since set to 5 minutes (PT5M) indicates that broker should find message published 5 minutes in the past, and set the initial position to that messageId.

format=duration
timeout string
format=duration
tlsOptions
All of: io.kestra.plugin.pulsar.AbstractPulsarConnection-TlsOptions object, TLS authentication options.
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.pulsar.RealtimeTrigger object

If you would like to consume multiple messages processed within a given time frame and process them in batch, you can use the io.kestra.plugin.pulsar.Trigger instead.##### Examples

Consume a message from a Pulsar topic in real-time.

id: pulsar
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.value }}"

triggers:
  - id: realtime_trigger
    type: io.kestra.plugin.pulsar.RealtimeTrigger
    topic: kestra_trigger
    uri: pulsar://localhost:26650
    deserializer: JSON
    subscriptionName: kestra_trigger_sub

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subscriptionName string required

Using subscription name, we will fetch only records that haven't been consumed yet.

topic required

Can be a string or a list of strings to consume from multiple topics.

type const: "io.kestra.plugin.pulsar.RealtimeTrigger" required
Constant: "io.kestra.plugin.pulsar.RealtimeTrigger"
uri string required

You need to specify a Pulsar protocol URL.

  • Example of localhost: pulsar://localhost:6650
  • If you have multiple brokers: pulsar://localhost:6650,localhost:6651,localhost:6652
  • If you use TLS authentication: pulsar+ssl://pulsar.us-west.example.com:6651
authenticationToken string

Authentication token that can be required by some providers such as Clever Cloud.

conditions array
consumerName string
consumerProperties Record<string, string>
description string
deserializer
All of: Serializer / Deserializer used for the value. string, Deserializer used for the value.
disabled boolean

Default value is : false

Default: false
encryptionKey string
initialPosition string

Default value is : Earliest

Default: "Earliest"
Values: "Latest" "Earliest"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
schemaString string

Required for connecting with topics with a defined schema and strict schema checking

schemaType string

Can be one of NONE, AVRO or JSON. None means there will be no schema enforced.

Default value is : NONE

Default: "NONE"
Values: "NONE" "AVRO" "JSON"
stopAfter string[]
subscriptionType string

Default value is : Exclusive

Default: "Exclusive"
Values: "Exclusive" "Shared" "Failover" "Key_Shared"
tlsOptions
All of: io.kestra.plugin.pulsar.AbstractPulsarConnection-TlsOptions object, TLS authentication options.
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.pulsar.Trigger object

Note that you don't need an extra task to consume the message from the event trigger. The trigger will automatically consume messages and you can retrieve their content in your flow using the {{ trigger.uri }} variable. If you would like to consume each message from a Pulsar topic in real-time and create one execution per message, you can use the io.kestra.plugin.pulsar.RealtimeTrigger instead.##### Examples

id: pulsar_trigger
namespace: company.team

tasks:
  - id: log
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.value }}"

triggers:
  - id: trigger
    type: io.kestra.plugin.pulsar.Trigger
    interval: PT30S
    topic: kestra_trigger
    uri: pulsar://localhost:26650
    deserializer: JSON
    subscriptionName: kestra_trigger_sub

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
subscriptionName string required

Using subscription name, we will fetch only records that haven't been consumed yet.

topic required

Can be a string or a list of strings to consume from multiple topics.

type const: "io.kestra.plugin.pulsar.Trigger" required
Constant: "io.kestra.plugin.pulsar.Trigger"
uri string required

You need to specify a Pulsar protocol URL.

  • Example of localhost: pulsar://localhost:6650
  • If you have multiple brokers: pulsar://localhost:6650,localhost:6651,localhost:6652
  • If you use TLS authentication: pulsar+ssl://pulsar.us-west.example.com:6651
authenticationToken string

Authentication token that can be required by some providers such as Clever Cloud.

conditions array
consumerName string
consumerProperties Record<string, string>
description string
deserializer
All of: Serializer / Deserializer used for the value. string, Deserializer used for the value.
disabled boolean

Default value is : false

Default: false
encryptionKey string
initialPosition string

Default value is : Earliest

Default: "Earliest"
Values: "Latest" "Earliest"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not a hard limit and is evaluated every second.

format=duration
maxRecords integer

It's not a hard limit and is evaluated every second.

pollDuration string

If no records are available, the maximum wait to wait for a new record.

Default value is : 2.000000000

Default: 2.0
format=duration
schemaString string

Required for connecting with topics with a defined schema and strict schema checking

schemaType string

Can be one of NONE, AVRO or JSON. None means there will be no schema enforced.

Default value is : NONE

Default: "NONE"
Values: "NONE" "AVRO" "JSON"
stopAfter string[]
subscriptionType string

Default value is : Exclusive

Default: "Exclusive"
Values: "Exclusive" "Shared" "Failover" "Key_Shared"
tlsOptions
All of: io.kestra.plugin.pulsar.AbstractPulsarConnection-TlsOptions object, TLS authentication options.
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.list.ListPop object
Examples
id: redis_list_pop
namespace: company.team

tasks:
  - id: list_pop
    type: io.kestra.plugin.redis.list.ListPop
    url: redis://:redis@localhost:6379/0
    key: mypopkeyjson
    serdeType: JSON
    maxRecords: 1

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.redis.list.ListPop" required
Constant: "io.kestra.plugin.redis.list.ListPop"
url string required
allowFailure boolean

Default value is : false

Default: false
count integer

Default value is : 100

Default: 100
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

serdeType
All of: Serializer / Deserializer use for the value string, Format of the data contained in Redis.
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.list.ListPush object
Examples
id: redis_list_push
namespace: company.team

tasks:
  - id: list_push
    type: io.kestra.plugin.redis.list.ListPush
    url: redis://:redis@localhost:6379/0
    key: mykey
    from:
      - value1
      - value2

from string | array required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.redis.list.ListPush" required
Constant: "io.kestra.plugin.redis.list.ListPush"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
serdeType
All of: Serializer / Deserializer use for the value string, Format of the data contained in Redis
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.list.RealtimeTrigger object

If you would like to consume multiple elements processed within a given time frame and process them in batch, you can use the io.kestra.plugin.redis.list.Trigger instead.##### Examples

Consume an element from the head of a list in real-time.

id: list_listen
namespace: company.team

tasks:
  - id: echo
    type: io.kestra.plugin.core.log.Log
    message: "Received '{{ trigger.value }}'"

triggers:
  - id: watch
    type: io.kestra.plugin.redis.RealtimeTrigger
    url: redis://localhost:6379/0
    key: mytriggerkey

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.redis.list.RealtimeTrigger" required
Constant: "io.kestra.plugin.redis.list.RealtimeTrigger"
url string required
conditions array
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
serdeType
All of: Serializer / Deserializer use for the value string, Format of the data contained in Redis
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.list.Trigger object

If you would like to consume each message from a list in real-time and create one execution per message, you can use the io.kestra.plugin.redis.list.RealtimeTrigger instead.##### Examples

id: list_listen
namespace: company.team

tasks:
  - id: echo
    type: io.kestra.plugin.core.log.Log
    message: "{{ trigger.uri }} containing {{ trigger.count }} lines"

triggers:
  - id: watch
    type: io.kestra.plugin.redis.list.Trigger
    url: redis://localhost:6379/0
    key: mytriggerkey
    maxRecords: 2

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.redis.list.Trigger" required
Constant: "io.kestra.plugin.redis.list.Trigger"
url string required
conditions array
count integer

Default value is : 100

Default: 100
description string
disabled boolean

Default value is : false

Default: false
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

It's not an hard limit and is evaluated every second.

format=duration
maxRecords integer

It's not an hard limit and is evaluated every second.

serdeType
All of: Serializer / Deserializer use for the value string, Format of the data contained in Redis
stopAfter string[]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.pubsub.Publish object
Examples
id: redis_publish
namespace: company.team

tasks:
  - id: publish
    type: io.kestra.plugin.redis.pubsub.Publish
    url: redis://:redis@localhost:6379/0
    channel: mych
    from:
      - value1
      - value2

channel string required
from string | array required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.redis.pubsub.Publish" required
Constant: "io.kestra.plugin.redis.pubsub.Publish"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
serdeType
All of: Serializer / Deserializer use for the value string, Format of the data contained in Redis.
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.string.Delete object
Examples
id: redis_delete
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.redis.string.Delete
    url: redis://:redis@localhost:6379/0
    keys:
      - keyDelete1
      - keyDelete2

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
keys string[] required
type const: "io.kestra.plugin.redis.string.Delete" required
Constant: "io.kestra.plugin.redis.string.Delete"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
failedOnMissing boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.string.Get object
Examples
id: redis_get
namespace: company.team

tasks:
  - id: get
    type: io.kestra.plugin.redis.string.Get
    url: redis://:redis@localhost:6379/0
    key: mykey

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.redis.string.Get" required
Constant: "io.kestra.plugin.redis.string.Get"
url string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
serdeType
All of: Serializer / Deserializer use for the value string, Format of the data contained in Redis
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.string.Set object
Examples
id: redis_set
namespace: company.team

tasks:
  - id: set
    type: io.kestra.plugin.redis.string.Set
    url: redis://:redis@localhost:6379/0
    key: mykey
    value: myvalue
    serdeType: STRING

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
key string required
type const: "io.kestra.plugin.redis.string.Set" required
Constant: "io.kestra.plugin.redis.string.Set"
url string required
value string required
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
get boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
options
All of: io.kestra.plugin.redis.string.Set-Options object, Options available when setting a key in Redis.
serdeType
All of: Serializer / Deserializer use for the value string, Format of the data contained in Redis.
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.redis.string.Set-Options object
expirationDate string
format=date-time
expirationDuration string
format=duration
keepTtl boolean
mustExist boolean
mustNotExist boolean
io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object
image string required
minLength=1
config string | object

Docker configuration file that can set access credentials to private container registries. Usually located in ~/.docker/config.json.

cpu
All of: io.kestra.plugin.scripts.runner.docker.Cpu object, Limits the CPU usage to a given maximum threshold value.
credentials
All of: Credentials for a private container registry. object, Credentials for a private container registry.
deviceRequests array
entryPoint string[]
extraHosts string[]
host string
memory
All of: io.kestra.plugin.scripts.runner.docker.Memory object, Limits memory usage to a given maximum threshold value.
networkMode string
pullPolicy string

Default value is : ALWAYS

Default: "ALWAYS"
Values: "IF_NOT_PRESENT" "ALWAYS" "NEVER"
shmSize string

The size must be greater than 0. If omitted, the system uses 64MB.

user string
volumes string[]

Must be a valid mount expression as string, example : /home/user:/app.

Volumes mount are disabled by default for security reasons; you must enable them on server configuration by setting kestra.tasks.scripts.docker.volume-enabled to true.

io.kestra.plugin.scripts.groovy.Eval object
Examples

Make an API call and pass request body to a Groovy script.

id: api_request_to_groovy
namespace: company.team

tasks:
  - id: request
    type: io.kestra.plugin.core.http.Request
    uri: "https://dummyjson.com/products/1"

  - id: groovy
    type: io.kestra.plugin.scripts.groovy.Eval
    script: |
      logger.info('{{ outputs.request.body }}')

  - id: download
    type: io.kestra.plugin.core.http.Download
    uri: "https://dummyjson.com/products/1"

  - id: run_context_groovy
    type: io.kestra.plugin.scripts.groovy.Eval
    script: |
      // logger.info('Vars: {}', runContext.getVariables())
      URI uri = new URI(runContext.variables.outputs.download.uri)
      InputStream istream = runContext.storage().getFile(uri)
      logger.info('Content: {}', istream.text)

id: groovy_eval
namespace: company.team

tasks:
  - id: eval
    type: io.kestra.plugin.scripts.groovy.Eval
    outputs:
      - out
      - map
    script: |
      import io.kestra.core.models.executions.metrics.Counter

      logger.info('executionId: {}', runContext.render('{{ execution.id }}'))
      runContext.metric(Counter.of('total', 666, 'name', 'bla'))

      map = Map.of('test', 'here')
      File tempFile = runContext.workingDir().createTempFile().toFile()
      var output = new FileOutputStream(tempFile)
      output.write('555\n666\n'.getBytes())

      out = runContext.storage().putFile(tempFile

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.groovy.Eval" required
Constant: "io.kestra.plugin.scripts.groovy.Eval"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
outputs string[]
script string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.groovy.FileTransform object

This allows you to transform the data, previously loaded by Kestra, as you need.

Take a ion format file from Kestra and iterate row per row. Each row will populate a row global variable. You need to alter this variable that will be saved on output file. If you set the row to null, the row will be skipped. You can create a variable rows to return multiple rows for a single row.

Examples

Convert row by row of a file from Kestra's internal storage.

id: groovy_file_transform
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: file_transform
    type: io.kestra.plugin.scripts.groovy.FileTransform
    from: "{{ inputs.file }}"
    script: |
      logger.info('row: {}', row)

      if (row.get('name') == 'richard') {
        row = null
      } else {
        row.put('email', row.get('name') + '@kestra.io')
      }

Create multiple rows from one row.

id: groovy_file_transform
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: file_transform
    type: io.kestra.plugin.scripts.groovy.FileTransform
    from: "{{ inputs.file }}"
    script: |
      logger.info('row: {}', row)
      rows = [["action", "insert"], row]

Transform a JSON string to a file.

id: groovy_file_transform
namespace: company.team

inputs:
  - id: json
    type: JSON
    defaults: [{"name":"jane"}, {"name":"richard"}]

tasks:
  - id: file_transform
    type: io.kestra.plugin.scripts.groovy.FileTransform
    from: "{{ inputs.json }}"
    script: |
      logger.info('row: {}', row)

      if (row.get('name') == 'richard') {
        row = null
      } else {
        row.put('email', row.get('name') + '@kestra.io')
      }

JSON transformations using jackson library

id: json_transform_using_jackson
namespace: company.team

tasks:
  - id: file_transform
    type: io.kestra.plugin.scripts.groovy.FileTransform
    from: "[{"name":"John Doe", "age":99, "embedded":{"foo":"bar"}}]"
    script: |
      import com.fasterxml.jackson.*

      def mapper = new databind.ObjectMapper();
      def jsonStr = mapper.writeValueAsString(row);
      logger.info('input in json str: {}', jsonStr)

      def typeRef = new core.type.TypeReference<HashMap<String,Object>>() {};

      data = mapper.readValue(jsonStr, typeRef);

      logger.info('json object: {}', data);
      logger.info('embedded field: {}', data.embedded.foo)

from string required

Can be Kestra's internal storage URI, a map or a list.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.groovy.FileTransform" required
Constant: "io.kestra.plugin.scripts.groovy.FileTransform"
allowFailure boolean

Default value is : false

Default: false
concurrent integer

Take care that the order is not respected if you use parallelism.

min=2
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
script string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.jbang.Commands object
Examples

Execute JBang command to execute a JAR file.

id: jbang_commands
namespace: company.team

tasks:
  - id: commands
    type: io.kestra.plugin.scripts.jbang.Commands
    commands:
      - jbang --quiet --main picocli.codegen.aot.graalvm.ReflectionConfigGenerator info.picocli:picocli-codegen:4.6.3

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.jbang.Commands" required
Constant: "io.kestra.plugin.scripts.jbang.Commands"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : jbangdev/jbang-action

Default: "jbangdev/jbang-action"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.jbang.Script object
Examples

Execute a script written in Java

id: jbang_script
namespace: company.team

tasks:
  - id: script
    type: io.kestra.plugin.scripts.jbang.Script
    script: |
      class helloworld {
          public static void main(String[] args) {
              if(args.length==0) {
                  System.out.println("Hello World!");
              } else {
                  System.out.println("Hello " + args[0]);
              }
          }
      }

Execute a script written in Java with dependencies

id: jbang_script
namespace: company.team

tasks:
  - id: script_with_dependency
    type: io.kestra.plugin.scripts.jbang.Script
    script: |
      //DEPS ch.qos.reload4j:reload4j:1.2.19

      import org.apache.log4j.Logger;
      import org.apache.log4j.BasicConfigurator;

      class classpath_example {

        static final Logger logger = Logger.getLogger(classpath_example.class);

        public static void main(String[] args) {
          BasicConfigurator.configure(); 
          logger.info("Hello World");
        }
      }

Execute a script written in Kotlin.

id: jbang_script
namespace: company.team

tasks:
  - id: script_kotlin
    type: io.kestra.plugin.scripts.jbang.Script
    extension: .kt
    script: |
      public fun main() {
          println("Hello World");
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
script string required
minLength=1
type const: "io.kestra.plugin.scripts.jbang.Script" required
Constant: "io.kestra.plugin.scripts.jbang.Script"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : jbangdev/jbang-action

Default: "jbangdev/jbang-action"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
extension string

JBang support more than Java scripts, you can use it with JShell (.jsh), Kotlin (.kt), Groovy (.groovy) or even Markdowns (.md).

Default value is : .java

Default: ".java"
minLength=1
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

quiet boolean

By default, JBang logs in stderr so quiet is configured to true by default so no JBang logs are shown except errors.

Default value is : true

Default: true
runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.julia.Commands object
Examples

Create a Julia script, install required packages and execute it. Note that instead of defining the script inline, you could create the Julia script in the embedded VS Code editor and point to its location by path. If you do so, make sure to enable namespace files by setting the enabled flag of the namespaceFiles property to true.

id: julia_commands
namespace: company.team

tasks:
  - id: commands
    type: io.kestra.plugin.scripts.julia.Commands
    warningOnStdErr: false
    inputFiles:
      main.jl: |
        using DataFrames, CSV
        df = DataFrame(Name = ["Alice", "Bob", "Charlie"], Age = [25, 30, 35])
        CSV.write("output.csv", df)
    outputFiles:
      - output.csv
    beforeCommands:
      - julia -e 'using Pkg; Pkg.add("DataFrames"); Pkg.add("CSV")'
    commands:
      - julia main.jl

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.julia.Commands" required
Constant: "io.kestra.plugin.scripts.julia.Commands"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : julia

Default: "julia"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.julia.Script object
Examples

Create a Julia script, install required packages and execute it. Note that instead of defining the script inline, you could create the Julia script in the embedded VS Code editor and read its content using the {{ read('your_script.jl') }} function.

id: julia_script
namespace: company.team

tasks:
  - id: script
    type: io.kestra.plugin.scripts.julia.Script
    warningOnStdErr: false
    script: |
      using DataFrames, CSV
      df = DataFrame(Name = ["Alice", "Bob", "Charlie"], Age = [25, 30, 35])
      CSV.write("output.csv", df)
    outputFiles:
      - output.csv
    beforeCommands:
      - julia -e 'using Pkg; Pkg.add("DataFrames"); Pkg.add("CSV")'

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
script string required
minLength=1
type const: "io.kestra.plugin.scripts.julia.Script" required
Constant: "io.kestra.plugin.scripts.julia.Script"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : julia

Default: "julia"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.jython.Eval object
Examples
id: jython_eval
namespace: company.team

tasks:
  - id: eval
    type: io.kestra.plugin.scripts.jython.Eval
    outputs:
      - out
      - map
    script: |
      from io.kestra.core.models.executions.metrics import Counter
      import tempfile
      from java.io import File

      logger.info('executionId: {}', runContext.render('{{ execution.id }}'))
      runContext.metric(Counter.of('total', 666, 'name', 'bla'))

      map = {'test': 'here'}
      tempFile = tempfile.NamedTemporaryFile()
      tempFile.write('555\n666\n')

      out = runContext.storage().putFile(File(tempFile.name)

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.jython.Eval" required
Constant: "io.kestra.plugin.scripts.jython.Eval"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
outputs string[]
script string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.jython.FileTransform object

This allows you to transform the data, previously loaded by Kestra, as you need.

Take a ion format file from Kestra and iterate row per row. Each row will populate a row global variable. You need to alter this variable that will be saved on output file. If you set the row to null, the row will be skipped. You can create a variable rows to return multiple rows for a single row.

Examples

Extract data from an API, add a column, and store it as a downloadable CSV file.

id: etl_api_to_csv
namespace: company.team

tasks:
  - id: download
    type: io.kestra.plugin.fs.http.Download
    uri: https://gorest.co.in/public/v2/users

  - id: ion_to_json
    type: io.kestra.plugin.serdes.json.JsonToIon
    from: "{{ outputs.download.uri }}"
    newLine: false

  - id: write_json
    type: io.kestra.plugin.serdes.json.IonToJson
    from: "{{ outputs.ion_to_json.uri }}"

  - id: add_column
    type: io.kestra.plugin.scripts.jython.FileTransform
    from: "{{ outputs.write_json.uri }}"
    script: |
      from datetime import datetime
      logger.info('row: {}', row)
      row['inserted_at'] = datetime.utcnow()

  - id: csv
    type: io.kestra.plugin.serdes.csv.IonToCsv
    from: "{{ outputs.add_column.uri }}"

Transform with file from internal storage.

id: jython_file_transform
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: file_transform
    type: io.kestra.plugin.scripts.jython.FileTransform
    from: "{{ inputs.file }}"
    script: |
      logger.info('row: {}', row)

      if row['name'] == 'richard':
        row = None
      else:
        row['email'] = row['name'] + '@kestra.io'

Transform with file from JSON string.

id: jython_file_transform
namespace: company.team

inputs:
  - id: json
    type: JSON
    defaults: {"name": "john"}

tasks:
  - id: file_transform
    type: io.kestra.plugin.scripts.jython.FileTransform
    from: "{{ inputs.json }}"
    script: |
      logger.info('row: {}', row)

      if row['name'] == 'richard':
        row = None
      else:
        row['email'] = row['name'] + '@kestra.io'

from string required

Can be Kestra's internal storage URI, a map or a list.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.jython.FileTransform" required
Constant: "io.kestra.plugin.scripts.jython.FileTransform"
allowFailure boolean

Default value is : false

Default: false
concurrent integer

Take care that the order is not respected if you use parallelism.

min=2
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
script string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.nashorn.Eval object
Examples
id: nashorn_eval
namespace: company.team

tasks:
  - id: eval
    type: io.kestra.plugin.scripts.nashorn.Eval
    outputs:
      - out
      - map
    script: |
      var Counter = Java.type('io.kestra.core.models.executions.metrics.Counter');
      var File = Java.type('java.io.File');
      var FileOutputStream = Java.type('java.io.FileOutputStream');

      logger.info('executionId: {}', runContext.render('{{ execution.id }}'));
      runContext.metric(Counter.of('total', 666, 'name', 'bla'));

      map = {'test': 'here'}
      var tempFile = runContext.workingDir().createTempFile().toFile()
      var output = new FileOutputStream(tempFile)
      output.write('555\n666\n'.getBytes())

      out = runContext.storage().putFile(tempFile)"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.nashorn.Eval" required
Constant: "io.kestra.plugin.scripts.nashorn.Eval"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
outputs string[]
script string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.nashorn.FileTransform object
Examples

Transform with file from internal storage

id: nashorn_file_transform
namespace: company.team

tasks:
  - id: file_transform
    type: io.kestra.plugin.scripts.nashorn.FileTransform
    from: "{{ outputs['avro-to-gcs'] }}"
    script: |
      logger.info('row: {}', row)

      if (row['name'] === 'richard') {
        row = null
      } else {
        row['email'] = row['name'] + '@kestra.io'
      }

Transform JSON string input with a Nashorn script.

id: nashorn_file_transform
namespace: company.team

tasks:
  - id: file_transform
    type: io.kestra.plugin.scripts.nashorn.FileTransform
    from: "[{"name":"jane"}, {"name":"richard"}]"
    script: |
      logger.info('row: {}', row)

      if (row['name'] === 'richard') {
        row = null
      } else {
        row['email'] = row['name'] + '@kestra.io'
      }

from string required

Can be Kestra's internal storage URI, a map or a list.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.nashorn.FileTransform" required
Constant: "io.kestra.plugin.scripts.nashorn.FileTransform"
allowFailure boolean

Default value is : false

Default: false
concurrent integer

Take care that the order is not respected if you use parallelism.

min=2
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
script string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.node.Commands object
Examples

Install required npm packages, create a Node.js script and execute it.

id: nodejs_commands
namespace: company.team

tasks:
  - id: commands
    type: io.kestra.plugin.scripts.node.Commands
    inputFiles:
      main.js: |
        const colors = require("colors");
        console.log(colors.red("Hello"));
    beforeCommands:
      - npm install colors
    commands:
      - node main.js
    warningOnStdErr: false

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.node.Commands" required
Constant: "io.kestra.plugin.scripts.node.Commands"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : node

Default: "node"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.node.Script object
Examples

Install package, create a Node.js script and execute it.

id: nodejs_script
namespace: company.team

tasks:
  - id: script
    type: io.kestra.plugin.scripts.node.Script
    beforeCommands:
      - npm install colors
    script: |
      const colors = require("colors");
      console.log(colors.red("Hello"));
    warningOnStdErr: false"

If you want to generate files in your script to make them available for download and use in downstream tasks, you can leverage the {{ outputDir }} variable. Files stored in that directory will be persisted in Kestra's internal storage. To access this output in downstream tasks, use the syntax {{ outputs.yourTaskId.outputFiles['yourFileName.fileExtension'] }}.

Alternatively, instead of the {{ outputDir }} variable, you could use the outputFiles property to output files from your script. You can access those files in downstream tasks using the same syntax {{ outputs.yourTaskId.outputFiles['yourFileName.fileExtension'] }}, and you can download the files from the UI's Output tab.

id: nodejs_script
namespace: company.team

tasks:
  - id: node
    type: io.kestra.plugin.scripts.node.Script
    warningOnStdErr: false
    beforeCommands:
        - npm install json2csv > /dev/null 2>&1
    script: |
        const fs = require('fs');
        const { Parser } = require('json2csv');

        // Product prices in our simulation
        const productPrices = {
            'T-shirt': 20,
            'Jeans': 75,
            'Shoes': 80,
            'Socks': 5,
            'Hat': 25
        }

        const generateOrder = () => {
            const products = ['T-shirt', 'Jeans', 'Shoes', 'Socks', 'Hat'];
            const statuses = ['pending', 'shipped', 'delivered', 'cancelled'];

            const randomProduct = products[Math.floor(Math.random() * products.length)];
            const randomStatus = statuses[Math.floor(Math.random() * statuses.length)];
            const randomQuantity = Math.floor(Math.random() * 10) + 1;

            const order = {
                product: randomProduct,
                status: randomStatus,
                quantity: randomQuantity,
                total: randomQuantity * productPrices[randomProduct]
            };

            return order;
        }

        let totalSales = 0;
        let orders = [];

        for (let i = 0; i < 100; i++) {
            const order = generateOrder();
            orders.push(order);
            totalSales += order.total;
        }

        console.log(`Total sales: $${totalSales}`);

        const fields = ['product', 'status', 'quantity', 'total'];
        const json2csvParser = new Parser({ fields });
        const csvData = json2csvParser.parse(orders);

        fs.writeFileSync('{{ outputDir }}/orders.csv', csvData);

        console.log('Orders saved to orders.csv');

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
script string required
minLength=1
type const: "io.kestra.plugin.scripts.node.Script" required
Constant: "io.kestra.plugin.scripts.node.Script"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : node

Default: "node"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.powershell.Commands object
Examples

Execute PowerShell commands.

id: execute_powershell_commands
namespace: company.team

tasks:
  - id: powershell
    type: io.kestra.plugin.scripts.powershell.Commands
    inputFiles:
      main.ps1: |
        'Hello, World!' | Write-Output
    commands:
      - ./main.ps1

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.powershell.Commands" required
Constant: "io.kestra.plugin.scripts.powershell.Commands"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ghcr.io/kestra-io/powershell:latest

Default: "ghcr.io/kestra-io/powershell:latest"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- pwsh

  • -NoProfile
  • -NonInteractive
  • -Command`

Default value is : `- pwsh

  • -NoProfile
  • -NonInteractive
  • -Command`
Default:
[
  "pwsh",
  "-NoProfile",
  "-NonInteractive",
  "-Command"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.powershell.Script object
Examples

Execute a PowerShell script.

id: execute_powershell_script
namespace: company.team

tasks:
  - id: powershell
    type: io.kestra.plugin.scripts.powershell.Script
    script: |
      'Hello, World!' | Write-Output

If you want to generate files in your script to make them available for download and use in downstream tasks, you can leverage the {{ outputDir }} variable. Files stored in that directory will be persisted in Kestra's internal storage. To access this output in downstream tasks, use the syntax {{ outputs.yourTaskId.outputFiles['yourFileName.fileExtension'] }}.

id: powershell_generate_files
namespace: company.team

tasks:
  - id: powershell
    type: io.kestra.plugin.scripts.powershell.Script
    script: |
      Set-Content -Path {{ outputDir }}\hello.txt -Value "Hello World"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
script string required
minLength=1
type const: "io.kestra.plugin.scripts.powershell.Script" required
Constant: "io.kestra.plugin.scripts.powershell.Script"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ghcr.io/kestra-io/powershell:latest

Default: "ghcr.io/kestra-io/powershell:latest"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- pwsh

  • -NoProfile
  • -NonInteractive
  • -Command`

Default value is : `- pwsh

  • -NoProfile
  • -NonInteractive
  • -Command`
Default:
[
  "pwsh",
  "-NoProfile",
  "-NonInteractive",
  "-Command"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.python.Commands object
Examples

Execute a Python script in a Conda virtual environment. First, add the following script in the embedded Code Editor and name it etl_script.py:

import argparse

parser = argparse.ArgumentParser()

parser.add_argument("--num", type=int, default=42, help="Enter an integer")

args = parser.parse_args()
result = args.num * 2
print(result)

Then, make sure to set the enabled flag of the namespaceFiles property to true to enable namespace files. We include only the etl_script.py file as that is the only file we require from namespace files.

This flow uses a io.kestra.plugin.core.runner.Process Task Runner and Conda virtual environment for process isolation and dependency management. However, note that, by default, Kestra runs tasks in a Docker container (i.e. a Docker task runner), and you can use the taskRunner property to customize many options, as well as containerImage to choose the Docker image to use.

id: python_venv
namespace: company.team

tasks:
  - id: python
    type: io.kestra.plugin.scripts.python.Commands
    namespaceFiles:
      enabled: true
      include:
        - etl_script.py
    taskRunner:
      type: io.kestra.plugin.core.runner.Process
    beforeCommands:
      - conda activate myCondaEnv
    commands:
      - python etl_script.py

Execute a Python script from Git in a Docker container and output a file

id: python_commands_example
namespace: company.team

tasks:
  - id: wdir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/examples
        branch: main

      - id: git_python_scripts
        type: io.kestra.plugin.scripts.python.Commands
        warningOnStdErr: false
        containerImage: ghcr.io/kestra-io/pydata:latest
        beforeCommands:
          - pip install faker > /dev/null
        commands:
          - python examples/scripts/etl_script.py
          - python examples/scripts/generate_orders.py
        outputFiles:
          - orders.csv

  - id: load_csv_to_s3
    type: io.kestra.plugin.aws.s3.Upload
    accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}"
    secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}"
    region: eu-central-1
    bucket: kestraio
    key: stage/orders.csv
    from: "{{ outputs.gitPythonScripts.outputFiles['orders.csv'] }}"

Execute a Python script on a remote worker with a GPU

id: gpu_task
namespace: company.team

tasks:
  - id: python
    type: io.kestra.plugin.scripts.python.Commands
    taskRunner:
      type: io.kestra.plugin.core.runner.Process
    commands:
      - python ml_on_gpu.py
    workerGroup:
      key: gpu

Pass detected S3 objects from the event trigger to a Python script

id: s3_trigger_commands
namespace: company.team
description: process CSV file from S3 trigger

tasks:
  - id: wdir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/examples
        branch: main

      - id: python
        type: io.kestra.plugin.scripts.python.Commands
        inputFiles:
          data.csv: "{{ trigger.objects | jq('.[].uri') | first }}"
        description: this script reads a file `data.csv` from S3 trigger
        containerImage: ghcr.io/kestra-io/pydata:latest
        warningOnStdErr: false
        commands:
          - python examples/scripts/clean_messy_dataset.py
        outputFiles:
          - "*.csv"
          - "*.parquet"

triggers:
  - id: wait_for_s3_object
    type: io.kestra.plugin.aws.s3.Trigger
    bucket: declarative-orchestration
    maxKeys: 1
    interval: PT1S
    filter: FILES
    action: MOVE
    prefix: raw/
    moveTo:
      key: archive/raw/
    accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}"
    secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}"
    region: "{{ secret('AWS_DEFAULT_REGION') }}"

Execute a Python script from Git using a private Docker container image

id: python_in_container
namespace: company.team

tasks:
  - id: wdir
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/kestra-io/examples
        branch: main

      - id: git_python_scripts
        type: io.kestra.plugin.scripts.python.Commands
        warningOnStdErr: false
        commands:
          - python examples/scripts/etl_script.py
        outputFiles:
          - "*.csv"
          - "*.parquet"
        containerImage: annageller/kestra:latest
        taskRunner:
          type: io.kestra.plugin.scripts.runner.docker.Docker
          config: |
            {
              "auths": {
                  "https://index.docker.io/v1/": {
                      "username": "annageller",
                      "password": "{{ secret('DOCKER_PAT') }}"
                  }
              }
            }

Create a python script and execute it in a virtual environment

id: script_in_venv
namespace: company.team
tasks:
  - id: python
    type: io.kestra.plugin.scripts.python.Commands
    inputFiles:
      main.py: |
        import requests
        from kestra import Kestra

        response = requests.get('https://google.com')
        print(response.status_code)
        Kestra.outputs({'status': response.status_code, 'text': response.text})
    beforeCommands:
      - python -m venv venv
      - . venv/bin/activate
      - pip install requests kestra > /dev/null
    commands:
      - python main.py

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.python.Commands" required
Constant: "io.kestra.plugin.scripts.python.Commands"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ghcr.io/kestra-io/kestrapy:latest

Default: "ghcr.io/kestra-io/kestrapy:latest"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.python.Script object
Examples

Execute a Python script and generate an output.

id: python_use_input_file
namespace: company.team

tasks:
  - id: python
    task: io.kestra.plugin.scripts.python.Script
    script: |
      from kestra import Kestra
      import requests

      response = requests.get('https://kestra.io')
      print(response.status_code)

      Kestra.outputs({'status': response.status_code, 'text': response.text})
    beforeCommands:
      - pip install requests kestra

Log messages at different log levels using Kestra logger.

id: python_logs
namespace: company.team

tasks:
  - id: python_logger
    type: io.kestra.plugin.scripts.python.Script
    allowFailure: true
    warningOnStdErr: false
    script: |
      import time
      from kestra import Kestra

      logger = Kestra.logger()

      logger.debug("DEBUG is used for diagnostic info.")
      time.sleep(0.5)

      logger.info("INFO confirms normal operation.")
      time.sleep(0.5)

      logger.warning("WARNING signals something unexpected.")
      time.sleep(0.5)

      logger.error("ERROR indicates a serious issue.")
      time.sleep(0.5)

      logger.critical("CRITICAL means a severe failure.")

Execute a Python script with an input file from Kestra's local storage created by a previous task.

id: python_use_input_file
namespace: company.team

tasks:
  - id: python
    task: io.kestra.plugin.scripts.python.Script
    script: |
      with open('{{ outputs.previousTaskId.uri }}', 'r') as f:
        print(f.read())

Execute a Python script that outputs a file.

id: python_output_file
namespace: company.team

tasks:
  - id: python
    type: io.kestra.plugin.scripts.python.Script
    script: |
       f = open("{{ outputDir }}/myfile.txt", "a")
       f.write("Hello from a Kestra task!")
       f.close()

If you want to generate files in your script to make them available for download and use in downstream tasks, you can leverage the {{outputDir}} expression. Files stored in that directory will be persisted in Kestra's internal storage. The first task in this example creates a file 'myfile.txt' and the next task can access it by leveraging the syntax {{outputs.yourTaskId.outputFiles['yourFileName.fileExtension']}}.

id: python_outputs
namespace: company.team

tasks:
  - id: clean_dataset
    type: io.kestra.plugin.scripts.python.Script
    containerImage: ghcr.io/kestra-io/pydata:latest
    script: |
      import pandas as pd
      df = pd.read_csv("https://huggingface.co/datasets/kestra/datasets/raw/main/csv/messy_dataset.csv")

      # Replace non-numeric age values with NaN
      df["Age"] = pd.to_numeric(df["Age"], errors="coerce")

      # mean imputation: fill NaN values with the mean age
      mean_age = int(df["Age"].mean())
      print(f"Filling NULL values with mean: {mean_age}")
      df["Age"] = df["Age"].fillna(mean_age)
      df.to_csv("{{ outputDir }}/clean_dataset.csv", index=False)

  - id: readFileFromPython
    type: io.kestra.plugin.scripts.shell.Commands
    taskRunner:
      type: io.kestra.plugin.core.runner.Process
    commands:
      - head -n 10 {{ outputs.clean_dataset.outputFiles['clean_dataset.csv'] }}

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
script string required
minLength=1
type const: "io.kestra.plugin.scripts.python.Script" required
Constant: "io.kestra.plugin.scripts.python.Script"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ghcr.io/kestra-io/kestrapy:latest

Default: "ghcr.io/kestra-io/kestrapy:latest"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.r.Commands object
Examples

Create an R script, install required packages and execute it. Note that instead of defining the script inline, you could create the script as a dedicated R script in the embedded VS Code editor and point to its location by path. If you do so, make sure to enable namespace files by setting the enabled flag of the namespaceFiles property to true.

id: r_commands
namespace: company.team

tasks:
  - id: r
    type: io.kestra.plugin.scripts.r.Commands
    inputFiles:
      main.R: |
        library(lubridate)
        ymd("20100604");
        mdy("06-04-2011");
        dmy("04/06/2012")
    beforeCommands:
      - Rscript -e 'install.packages("lubridate")'
    commands:
      - Rscript main.R

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.r.Commands" required
Constant: "io.kestra.plugin.scripts.r.Commands"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : r-base

Default: "r-base"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.r.Script object
Examples

Install a package and execute an R script

script: |
  library(lubridate)
  ymd("20100604");
  mdy("06-04-2011");
  dmy("04/06/2012")
beforeCommands:
  - Rscript -e 'install.packages("lubridate")'

Add an R script in the embedded VS Code editor, install required packages and execute it.

Here is an example R script that you can add in the embedded VS Code editor. You can name the script file main.R:

library(dplyr)
library(arrow)

data(mtcars) # load mtcars data
print(head(mtcars))

final <- mtcars %>%
    summarise(
    avg_mpg = mean(mpg),
    avg_disp = mean(disp),
    avg_hp = mean(hp),
    avg_drat = mean(drat),
    avg_wt = mean(wt),
    avg_qsec = mean(qsec),
    avg_vs = mean(vs),
    avg_am = mean(am),
    avg_gear = mean(gear),
    avg_carb = mean(carb)
    )
final %>% print()
write.csv(final, "final.csv")

mtcars_clean <- na.omit(mtcars) # this line removes rows with NA values
write_parquet(mtcars_clean, "mtcars_clean.parquet")

Note that tasks in Kestra are stateless. Therefore, the files generated by a task, such as the CSV and Parquet files in the example above, are not persisted in Kestra's internal storage, unless you explicitly tell Kestra to do so. Make sure to add the outputFiles property to your task as shown below to persist the generated Parquet file (or any other file) in Kestra's internal storage and make them visible in the Outputs tab.

To access this output in downstream tasks, use the syntax {{outputs.yourTaskId.outputFiles['yourFileName.fileExtension']}}. Alternatively, you can wrap your tasks that need to pass data between each other in a WorkingDirectory task — this way, those tasks will share the same working directory and will be able to access the same files.

Note how we use the read function to read the content of the R script stored as a Namespace File.

Finally, note that the docker property is optional. If you don't specify it, Kestra will use the default R image. If you want to use a different image, you can specify it in the docker property as shown below.

id: r_cars
namespace: company.team

tasks:
  - id: r
    type: io.kestra.plugin.scripts.r.Script
    warningOnStdErr: false
    containerImage: ghcr.io/kestra-io/rdata:latest
    script: "{{ read('main.R') }}"
    outputFiles:
      - "*.csv"
      - "*.parquet"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
script string required
minLength=1
type const: "io.kestra.plugin.scripts.r.Script" required
Constant: "io.kestra.plugin.scripts.r.Script"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : r-base

Default: "r-base"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.ruby.Commands object
Examples

Create a Ruby script and execute it. The easiest way to create a Ruby script is to use the embedded VS Code editor. Create a file named main.rb and paste the following code:

require 'csv'
require 'json'

file = File.read('data.json')
data_hash = JSON.parse(file)

# Extract headers
headers = data_hash.first.keys

# Convert hashes to arrays
data = data_hash.map(&:values)

# Prepend headers to data
data.unshift(headers)

# Create and write data to CSV file
CSV.open('output.csv', 'wb') do |csv|
data.each { |row| csv << row }
end

In order to read that script from the Namespace File called main.rb, you need to enable the namespaceFiles property. We include only main.rb as that is the only file we want from the namespaceFiles.

Also, note how we use the inputFiles option to read additional files into the script's working directory. In this case, we read the data.json file, which contains the data that we want to convert to CSV.

Finally, we use the outputFiles option to specify that we want to output the output.csv file that is generated by the script. This allows us to access the file in the UI's Output tab and download it, or pass it to other tasks.

id: generate_csv
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.plugin.scripts.ruby.Commands
    namespaceFiles:
      enabled: true
      include:
        - main.rb
    inputFiles:
      data.json: |
        [
            {"Name": "Alice", "Age": 30, "City": "New York"},
            {"Name": "Bob", "Age": 22, "City": "Los Angeles"},
            {"Name": "Charlie", "Age": 35, "City": "Chicago"}
        ]
    beforeCommands:
      - ruby -v
    commands:
      - ruby main.rb
    outputFiles:
      - "*.csv"

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.ruby.Commands" required
Constant: "io.kestra.plugin.scripts.ruby.Commands"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ruby

Default: "ruby"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.ruby.Script object
Examples

Create a Ruby script and execute it. The easiest way to create a Ruby script is to use the embedded VS Code editor. Create a file named main.rb and paste the following code:

require 'csv'
require 'json'

file = File.read('data.json')
data_hash = JSON.parse(file)

# Extract headers
headers = data_hash.first.keys

# Convert hashes to arrays
data = data_hash.map(&:values)

# Prepend headers to data
data.unshift(headers)

# Create and write data to CSV file
CSV.open('output.csv', 'wb') do |csv|
data.each { |row| csv << row }
end

In order to read that script from the Namespace File called main.rb, you can leverage the {{ read('main.rb') }} function.

Also, note how we use the inputFiles option to read additional files into the script's working directory. In this case, we read the data.json file, which contains the data that we want to convert to CSV.

Finally, we use the outputFiles option to specify that we want to output the output.csv file that is generated by the script. This allows us to access the file in the UI's Output tab and download it, or pass it to other tasks.

id: generate_csv
namespace: company.team

tasks:
  - id: bash
    type: io.kestra.plugin.scripts.ruby.Script
    inputFiles:
      data.json: |
        [
            {"Name": "Alice", "Age": 30, "City": "New York"},
            {"Name": "Bob", "Age": 22, "City": "Los Angeles"},
            {"Name": "Charlie", "Age": 35, "City": "Chicago"}
        ]
    beforeCommands:
      - ruby -v
    script: "{{ read('main.rb') }}"
    outputFiles:
      - "*.csv"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
script string required
minLength=1
type const: "io.kestra.plugin.scripts.ruby.Script" required
Constant: "io.kestra.plugin.scripts.ruby.Script"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ruby

Default: "ruby"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.runner.docker.Cpu object
cpus integer

Make sure to set that to a numeric value e.g. cpus: "1.5" or cpus: "4" or For instance, if the host machine has two CPUs and you set cpus: "1.5", the container is guaranteed at most one and a half of the CPUs.

io.kestra.plugin.scripts.runner.docker.Credentials object
auth string

The auth field is a base64-encoded authentication string of username:password or a token.

identityToken string
password string
registry string

If not defined, the registry will be extracted from the image name.

registryToken string
username string
io.kestra.plugin.scripts.runner.docker.DeviceRequest object
capabilities string[][]
count integer
deviceIds string[]
driver string
options object

These options are passed directly to the driver.

io.kestra.plugin.scripts.runner.docker.Docker object

This task runner executes tasks in a container-based Docker-compatible engine. Use the containerImage property to configure the image for the task.

To access the task's working directory, use the {{workingDir}} Pebble expression or the WORKING_DIR environment variable. Input files and namespace files added to the task will be accessible from that directory.

To generate output files, we recommend using the outputFiles task's property. This allows you to explicitly define which files from the task's working directory should be saved as output files.

Alternatively, when writing files in your task, you can leverage the {{outputDir}} Pebble expression or the OUTPUT_DIR environment variable. All files written to that directory will be saved as output files automatically.##### Examples

Execute a Shell command.

id: simple_shell_example
namespace: company.team

tasks:
  - id: shell
    type: io.kestra.plugin.scripts.shell.Commands
    taskRunner:
      type: io.kestra.plugin.scripts.runner.docker.Docker
    commands:
    - echo "Hello World"

Pass input files to the task, execute a Shell command, then retrieve output files.

id: shell_example_with_files
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: shell
    type: io.kestra.plugin.scripts.shell.Commands
    inputFiles:
      data.txt: "{{ inputs.file }}"
    outputFiles:
      - "*.txt"
    containerImage: centos
    taskRunner:
      type: io.kestra.plugin.scripts.runner.docker.Docker
    commands:
    - cp {{ workingDir }}/data.txt {{ workingDir }}/out.txt

Run a Python script in Docker and allocate a specific amount of memory.

id: allocate_memory_to_python_script
namespace: company.team

tasks:
  - id: script
    type: io.kestra.plugin.scripts.python.Script
    taskRunner:
      type: io.kestra.plugin.scripts.runner.docker.Docker
      pullPolicy: IF_NOT_PRESENT
      cpu:
        cpus: 1
      memory: 
        memory: "512Mb"
    containerImage: ghcr.io/kestra-io/kestrapy:latest
    script: |
      from kestra import Kestra
      
      data = dict(message="Hello from Kestra!")
      Kestra.outputs(data)
type const: "io.kestra.plugin.scripts.runner.docker.Docker" required
Constant: "io.kestra.plugin.scripts.runner.docker.Docker"
config string | object

Docker configuration file that can set access credentials to private container registries. Usually located in ~/.docker/config.json.

cpu
All of: io.kestra.plugin.scripts.runner.docker.Cpu object, Limits the CPU usage to a given maximum threshold value.
credentials
All of: Credentials for a private container registry. object, Credentials for a private container registry.
delete boolean

Default value is : true

Default: true
deviceRequests array
entryPoint string[]

Default value is : - ""

Default value is : - ""

Default:
[
  ""
]
extraHosts string[]
fileHandlingStrategy string

How to handle local files (input files, output files, namespace files, ...). By default, we create a volume and copy the file into the volume bind path. Configuring it to MOUNT will mount the working directory instead.

Default value is : VOLUME

Default: "VOLUME"
Values: "MOUNT" "VOLUME"
host string
memory
All of: io.kestra.plugin.scripts.runner.docker.Memory object, Limits memory usage to a given maximum threshold value.
networkMode string
pullPolicy
All of: The image pull policy for a container image and the tag of the image, which affect when Docker attempts to pull (download) the specified image. string, The pull policy for a container image.
shmSize string

The size must be greater than 0. If omitted, the system uses 64MB.

user string
volumes string[]

Make sure to provide a map of a local path to a container path in the format: /home/local/path:/app/container/path. Volume mounts are disabled by default for security reasons — if you are sure you want to use them, enable that feature in the plugin configuration by setting volume-enabled to true.

Here is how you can add that setting to your kestra configuration:

kestra:
  plugins:
    configurations:
      - type: io.kestra.plugin.scripts.runner.docker.Docker
        values:
          volume-enabled: true
io.kestra.plugin.scripts.runner.docker.Memory object
kernelMemory string

The minimum allowed value is 4MB. Because kernel memory cannot be swapped out, a container which is starved of kernel memory may block host machine resources, which can have side effects on the host machine and on other containers. See the kernel-memory docs for more details.

memory string

Make sure to use the format number + unit (regardless of the case) without any spaces. The unit can be KB (kilobytes), MB (megabytes), GB (gigabytes), etc.

Given that it's case-insensitive, the following values are equivalent:

  • "512MB"
  • "512Mb"
  • "512mb"
  • "512000KB"
  • "0.5GB"

It is recommended that you allocate at least 6MB.

memoryReservation string

If you use memoryReservation, it must be set lower than memory for it to take precedence. Because it is a soft limit, it does not guarantee that the container doesn’t exceed the limit.

memorySwap string

If memory and memorySwap are set to the same value, this prevents containers from using any swap. This is because memorySwap includes both the physical memory and swap space, while memory is only the amount of physical memory that can be used.

memorySwappiness string

By default, the host kernel can swap out a percentage of anonymous pages used by a container. You can set memorySwappiness to a value between 0 and 100 to tune this percentage.

oomKillDisable boolean

To change this behavior, use the oomKillDisable option. Only disable the OOM killer on containers where you have also set the memory option. If the memory flag is not set, the host can run out of memory, and the kernel may need to kill the host system’s processes to free the memory.

io.kestra.plugin.scripts.shell.Commands object
Examples

Execute ETL in Rust in a Docker container and output CSV files generated as a result of the script.

id: rust_flow
namespace: company.team

tasks:
  - id: rust
    type: io.kestra.plugin.scripts.shell.Commands
    commands:
      - etl
    containerImage: ghcr.io/kestra-io/rust:latest
    outputFiles:
      - "*.csv"

Execute a single Shell command.

id: shell_single_command
namespace: company.team

tasks:
  - id: command
    type: io.kestra.plugin.scripts.shell.Commands
    commands:
      - 'echo "The current execution is: {{ execution.id }}"'

Include only specific namespace files.

id: include_files
namespace: company.team

tasks:
  - id: command
    type: io.kestra.plugin.scripts.shell.Commands
    description: "Only the included `namespaceFiles` get listed"
    namespaceFiles:
      enabled: true
      include:
        - test1.txt
        - test2.yaml
    commands:
      - ls

Exclude specific namespace files.

id: exclude_files
namespace: company.team

tasks:
  - id: command
    type: io.kestra.plugin.scripts.shell.Commands
    description: "All `namespaceFiles` except those that are excluded will be injected into the task's working directory"
    namespaceFiles:
      enabled: true
      exclude:
        - test1.txt
        - test2.yaml
    commands:
      - ls

Execute Shell commands that generate files accessible by other tasks and available for download in the UI's Output tab.

id: shell_generate_files
namespace: company.team

tasks:
  - id: commands
    type: io.kestra.plugin.scripts.shell.Commands
    outputFiles:
      - first.txt
      - second.txt
    commands:
      - echo "1" >> first.txt
      - echo "2" >> second.txt

Execute a Shell command using an input file generated in a previous task.

id: use_input_file
namespace: company.team

tasks:
  - id: http_download
   type: io.kestra.plugin.core.http.Download
   uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/products.csv

 - id: commands
   type: io.kestra.plugin.scripts.shell.Commands
   commands:
     - cat {{ outputs.http_download.uri }}

Run a PHP Docker container and execute a command.

id: run_php_code
namespace: company.team

tasks:
  - id: commands
    type: io.kestra.plugin.scripts.shell.Commands
    taskRunner:
      type: io.kestra.plugin.scripts.runner.docker.Docker
    containerImage: php
    commands:
      - php -r 'print(phpversion());'

Create output variables from a standard output.

id: create_output_variables
namespace: company.team

tasks:
  - id: commands
    type: io.kestra.plugin.scripts.shell.Commands
    commands:
      - echo '::{"outputs":{"test":"value","int":2,"bool":true,"float":3.65}}::'

Send a counter metric from a standard output.

id: create_counter_metric
namespace: company.team

tasks:
  - id: commands
    type: io.kestra.plugin.scripts.shell.Commands
    commands:
      - echo '::{"metrics":[{"name":"count","type":"counter","value":1,"tags":{"tag1":"i","tag2":"win"}}]}::'

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.scripts.shell.Commands" required
Constant: "io.kestra.plugin.scripts.shell.Commands"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ubuntu

Default: "ubuntu"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.scripts.shell.Script object
Examples

Create an inline Shell script and execute it.

id: shell_script_example
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/orders.csv

  - id: shell_script_task
    type: io.kestra.plugin.scripts.shell.Script
    outputFiles:
      - first.txt
    script: |
      echo "The current execution is : {{ execution.id }}"
      echo "1" >> first.txt
      cat {{ outputs.http_download.uri }}

If you want to generate files in your script to make them available for download and use in downstream tasks, you can leverage the {{ outputDir }} variable. Files stored in that directory will be persisted in Kestra's internal storage. To access this output in downstream tasks, use the syntax {{ outputs.yourTaskId.outputFiles['yourFileName.fileExtension'] }}.

id: shell_script_example
namespace: company.team

tasks:
  - id: hello
    type: io.kestra.plugin.scripts.shell.Script
    taskRunner:
      type: io.kestra.plugin.core.runner.Process
    outputFiles:
      - hello.txt
    script: |
      echo "Hello world!" > hello.txt
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
script string required
minLength=1
type const: "io.kestra.plugin.scripts.shell.Script" required
Constant: "io.kestra.plugin.scripts.shell.Script"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ubuntu

Default: "ubuntu"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.avro.AvroToIon object
Examples

Convert an Avro file to the Amazon Ion format.

id: avro_to_ion
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/avro/products.avro

  - id: to_ion
    type: io.kestra.plugin.serdes.avro.AvroToIon
    from: "{{ outputs.http_download.uri }}"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.avro.AvroToIon" required
Constant: "io.kestra.plugin.serdes.avro.AvroToIon"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.avro.IonToAvro object
Examples

Convert a CSV file to the Avro format.

id: divvy_tripdata
namespace: company.team

variables:
  file_id: "{{ execution.startDate | dateAdd(-3, 'MONTHS') | date('yyyyMM') }}"

tasks:
  - id: get_zipfile
    type: io.kestra.plugin.core.http.Download
    uri: "https://divvy-tripdata.s3.amazonaws.com/{{ render(vars.file_id) }}-divvy-tripdata.zip"

  - id: unzip
    type: io.kestra.plugin.compress.ArchiveDecompress
    algorithm: ZIP
    from: "{{ outputs.get_zipfile.uri }}"

  - id: convert
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ outputs.unzip.files[render(vars.file_id) ~ '-divvy-tripdata.csv'] }}"

  - id: to_avro
    type: io.kestra.plugin.serdes.avro.IonToAvro
    from: "{{ outputs.convert.uri }}"
    datetimeFormat: "yyyy-MM-dd' 'HH:mm:ss"
    schema: |
      {
        "type": "record",
        "name": "Ride",
        "namespace": "com.example.bikeshare",
        "fields": [
          {"name": "ride_id", "type": "string"},
          {"name": "rideable_type", "type": "string"},
          {"name": "started_at", "type": {"type": "long", "logicalType": "timestamp-millis"}},
          {"name": "ended_at", "type": {"type": "long", "logicalType": "timestamp-millis"}},
          {"name": "start_station_name", "type": "string"},
          {"name": "start_station_id", "type": "string"},
          {"name": "end_station_name", "type": "string"},
          {"name": "end_station_id", "type": "string"},
          {"name": "start_lat", "type": "double"},
          {"name": "start_lng", "type": "double"},
          {
            "name": "end_lat",
            "type": ["null", "double"],
            "default": null
          },
          {
            "name": "end_lng",
            "type": ["null", "double"],
            "default": null
          },
          {"name": "member_casual", "type": "string"}
        ]
      }
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
schema string required
type const: "io.kestra.plugin.serdes.avro.IonToAvro" required
Constant: "io.kestra.plugin.serdes.avro.IonToAvro"
allowFailure boolean

Default value is : false

Default: false
dateFormat string

Default value is : "yyyy-MM-dd[XXX]"

Default: "yyyy-MM-dd[XXX]"
datetimeFormat string

Default value is yyyy-MM-dd'T'HH:mm[:ss][.SSSSSS][XXX]

Default value is : "yyyy-MM-dd'T'HH:mm[:ss][.SSSSSS][XXX]"

Default: "yyyy-MM-dd'T'HH:mm[:ss][.SSSSSS][XXX]"
decimalSeparator string

Default value is '.'

Default value is : .

Default: "."
description string
disabled boolean

Default value is : false

Default: false
falseValues string[]

Default value is : `- f

  • "false"
  • disabled
  • 0
  • "off"
  • "no"
  • ""`

Default value is : `- f

  • "false"
  • disabled
  • 0
  • "off"
  • "no"
  • ""`
Default:
[
  "f",
  "false",
  "disabled",
  "0",
  "off",
  "no",
  ""
]
inferAllFields boolean

If true, we try to infer all fields with trueValues, trueValues & nullValues.If false, we will infer bool & null only on field declared on schema as null and bool.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
nullValues string[]

Default value is : `- ""

  • "#N/A"
  • "#N/A N/A"
  • "#NA"
  • -1.#IND
  • -1.#QNAN
  • -NaN
  • 1.#IND
  • 1.#QNAN
  • NA
  • n/a
  • nan
  • "null"`

Default value is : `- ""

  • "#N/A"
  • "#N/A N/A"
  • "#NA"
  • -1.#IND
  • -1.#QNAN
  • -NaN
  • 1.#IND
  • 1.#QNAN
  • NA
  • n/a
  • nan
  • "null"`
Default:
[
  "",
  "#N/A",
  "#N/A N/A",
  "#NA",
  "-1.#IND",
  "-1.#QNAN",
  "-NaN",
  "1.#IND",
  "1.#QNAN",
  "NA",
  "n/a",
  "nan",
  "null"
]
strictSchema boolean

Default value is false

Default value is : false

Default: false
timeFormat string

Default value is : "HH:mm[:ss][.SSSSSS][XXX]"

Default: "HH:mm[:ss][.SSSSSS][XXX]"
timeZoneId string

If null, the timezone will be UTC Default value is system timezone

Default value is : Etc/UTC

Default: "Etc/UTC"
timeout string
format=duration
trueValues string[]

Default value is : `- t

  • "true"
  • enabled
  • 1
  • "on"
  • "yes"`

Default value is : `- t

  • "true"
  • enabled
  • 1
  • "on"
  • "yes"`
Default:
[
  "t",
  "true",
  "enabled",
  "1",
  "on",
  "yes"
]
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.csv.CsvToIon object
Examples

Convert a CSV file to the Amazon Ion format.

id: csv_to_ion
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/products.csv

  - id: to_ion
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ outputs.http_download.uri }}"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.csv.CsvToIon" required
Constant: "io.kestra.plugin.serdes.csv.CsvToIon"
allowFailure boolean

Default value is : false

Default: false
charset string

Default value is : UTF-8

Default: "UTF-8"
description string
disabled boolean

Default value is : false

Default: false
errorOnDifferentFieldCount boolean

Default value is : false

Default: false
fieldSeparator string

Default value is : ","

Default: ","
header boolean

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
skipEmptyRows boolean

Default value is : false

Default: false
skipRows integer

Default value is : 0

Default: 0
textDelimiter string

Default value is : '"'

Default: """
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.csv.IonToCsv object
Examples

Download a CSV file, transform it in SQL and store the transformed data as a CSV file.

id: ion_to_csv
namespace: company.team

tasks:
  - id: download_csv
    type: io.kestra.plugin.core.http.Download
    description: salaries of data professionals from 2020 to 2023 (source ai-jobs.net)
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/salaries.csv

  - id: avg_salary_by_job_title
    type: io.kestra.plugin.jdbc.duckdb.Query
    inputFiles:
      data.csv: "{{ outputs.download_csv.uri }}"
    sql: |
      SELECT
        job_title,
        ROUND(AVG(salary),2) AS avg_salary
      FROM read_csv_auto('{{ workingDir }}/data.csv', header=True)
      GROUP BY job_title
      HAVING COUNT(job_title) > 10
      ORDER BY avg_salary DESC;
    store: true

  - id: result
    type: io.kestra.plugin.serdes.csv.IonToCsv
    from: "{{ outputs.avg_salary_by_job_title.uri }}"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.csv.IonToCsv" required
Constant: "io.kestra.plugin.serdes.csv.IonToCsv"
allowFailure boolean

Default value is : false

Default: false
alwaysDelimitText boolean

Default value is : false

Default: false
charset string

Default value is : UTF-8

Default: "UTF-8"
dateFormat string

Default value is : yyyy-MM-dd

Default: "yyyy-MM-dd"
dateTimeFormat string

Default value is : "yyyy-MM-dd'T'HH:mm:ss.SSS[XXX]"

Default: "yyyy-MM-dd'T'HH:mm:ss.SSS[XXX]"
description string
disabled boolean

Default value is : false

Default: false
fieldSeparator string

Default value is : ","

Default: ","
header boolean

Default value is : true

Default: true
lineDelimiter string

Default value is : |2+

Default: " "
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
textDelimiter string

Default value is : '"'

Default: """
timeFormat string

Default value is : "HH:mm:ss[XXX]"

Default: "HH:mm:ss[XXX]"
timeZoneId string

Default value is : Etc/UTC

Default: "Etc/UTC"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.excel.ExcelToIon object
Examples

Convert an Excel file to the Ion format.

id: excel_to_ion
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/excel/Products.xlsx

  - id: to_ion
    type: io.kestra.plugin.serdes.excel.ExcelToIon
    from: "{{ outputs.http_download.uri }}"

from string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.excel.ExcelToIon" required
Constant: "io.kestra.plugin.serdes.excel.ExcelToIon"
allowFailure boolean

Default value is : false

Default: false
charset string

Default value is : UTF-8

Default: "UTF-8"
dateTimeRender string

Possible values: SERIAL_NUMBER, FORMATTED_STRING

Default value is : UNFORMATTED_VALUE

Default: "UNFORMATTED_VALUE"
Values: "SERIAL_NUMBER" "FORMATTED_STRING" "UNFORMATTED_VALUE"
description string
disabled boolean

Default value is : false

Default: false
header boolean

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sheetsTitle string[]
skipEmptyRows boolean

Default value is : false

Default: false
skipRows integer

Default value is : 0

Default: 0
min=0
timeout string
format=duration
valueRender string

Possible values: FORMATTED_VALUE, UNFORMATTED_VALUE, FORMULA

Default value is : UNFORMATTED_VALUE

Default: "UNFORMATTED_VALUE"
Values: "FORMATTED_VALUE" "UNFORMATTED_VALUE" "FORMULA"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.excel.IonToExcel object
Examples

Download a CSV file and convert it to the Excel file format.

id: ion_to_excel
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/products.csv

  - id: convert
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ outputs.http_download.uri }}"

  - id: to_excel
    type: io.kestra.plugin.serdes.excel.IonToExcel
    from: "{{ outputs.convert.uri }}"

Download CSV files and convert them into an Excel file with dedicated sheets.

id: excel
namespace: company.team

tasks:
  - id: dataset1
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/products.csv

  - id: dataset2
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/fruit.csv

  - id: convert1
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ outputs.dataset1.uri }}"

  - id: convert2
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ outputs.dataset2.uri }}"

  - id: write
    type: io.kestra.plugin.serdes.excel.IonToExcel
    from:
      Sheet_1: "{{ outputs.convert1.uri }}"
      Sheet_2: "{{ outputs.convert2.uri }}"

from string | object required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.excel.IonToExcel" required
Constant: "io.kestra.plugin.serdes.excel.IonToExcel"
allowFailure boolean

Default value is : false

Default: false
charset string

Default value is : UTF-8

Default: "UTF-8"
dateFormat string

Default value is : yyyy-MM-dd

Default: "yyyy-MM-dd"
dateTimeFormat string

Default value is : "yyyy-MM-dd'T'HH:mm:ss.SSS[XXX]"

Default: "yyyy-MM-dd'T'HH:mm:ss.SSS[XXX]"
description string
disabled boolean

Default value is : false

Default: false
header boolean

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
sheetsTitle string

Default value is : Sheet

Default: "Sheet"
styles boolean

Excel is limited to 64000 styles per document, and styles are applied on every date, removed this options when you have a lots of values.

Default value is : true

Default: true
timeFormat string

Default value is : "HH:mm:ss[XXX]"

Default: "HH:mm:ss[XXX]"
timeZoneId string

Default value is : Etc/UTC

Default: "Etc/UTC"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.json.IonToJson object
Examples

Download a CSV file and convert it to a JSON format.

id: ion_to_json
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/products.csv

  - id: convert
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ outputs.http_download.uri }}"

  - id: to_json
    type: io.kestra.plugin.serdes.json.IonToJson
    from: "{{ outputs.convert.uri }}"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.json.IonToJson" required
Constant: "io.kestra.plugin.serdes.json.IonToJson"
allowFailure boolean

Default value is : false

Default: false
charset string

Default value is UTF-8.

Default value is : UTF-8

Default: "UTF-8"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
newLine boolean

Is the file is a json with new line separator Warning, if not, the whole file will loaded in memory and can lead to out of memory!

Default value is : true

Default: true
timeZoneId string

Default value is : Etc/UTC

Default: "Etc/UTC"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.json.JsonToIon object

Please note that we support JSONL format only, i.e. one JSON dictionary/map per line.

Here is how a sample JSON file content might look like:

{"product_id":"1","product_name":"streamline turn-key systems","product_category":"Electronics","brand":"gomez"},
{"product_id":"2","product_name":"morph viral applications","product_category":"Household","brand":"wolfe"},
{"product_id":"3","product_name":"expedite front-end schemas","product_category":"Household","brand":"davis-martinez"}

We do NOT support an array of JSON objects. A JSON file in the following array format is not supported:

[
    {"product_id":"1","product_name":"streamline turn-key systems","product_category":"Electronics","brand":"gomez"},
    {"product_id":"2","product_name":"morph viral applications","product_category":"Household","brand":"wolfe"},
    {"product_id":"3","product_name":"expedite front-end schemas","product_category":"Household","brand":"davis-martinez"}
]
Examples

Convert a JSON file to the Amazon Ion format.

id: json_to_ion
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/json/products.json

  - id: to_ion
    type: io.kestra.plugin.serdes.json.JsonToIon
    from: "{{ outputs.http_download.uri }}"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.json.JsonToIon" required
Constant: "io.kestra.plugin.serdes.json.JsonToIon"
allowFailure boolean

Default value is : false

Default: false
charset string

Default value is UTF-8.

Default value is : UTF-8

Default: "UTF-8"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
newLine boolean

Is the file is a json with new line separator Warning, if not, the whole file will loaded in memory and can lead to out of memory!

Default value is : true

Default: true
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.parquet.IonToParquet object
Examples

Read a CSV file, transform it and store the transformed data as a parquet file.

id: ion_to_parquet
namespace: company.team

tasks:
  - id: download_csv
    type: io.kestra.plugin.core.http.Download
    description: salaries of data professionals from 2020 to 2023 (source ai-jobs.net)
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/salaries.csv

  - id: avg_salary_by_job_title
    type: io.kestra.plugin.jdbc.duckdb.Query
    inputFiles:
      data.csv: "{{ outputs.download_csv.uri }}"
    sql: |
      SELECT
        job_title,
        ROUND(AVG(salary),2) AS avg_salary
      FROM read_csv_auto('{{ workingDir }}/data.csv', header=True)
      GROUP BY job_title
      HAVING COUNT(job_title) > 10
      ORDER BY avg_salary DESC;
    store: true

  - id: result
    type: io.kestra.plugin.serdes.parquet.IonToParquet
    from: "{{ outputs.avg_salary_by_job_title.uri }}"
    schema: |
      {
        "type": "record",
        "name": "Salary",
        "namespace": "com.example.salary",
        "fields": [
          {"name": "job_title", "type": "string"},
          {"name": "avg_salary", "type": "double"}
        ]
      }

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
schema string required
type const: "io.kestra.plugin.serdes.parquet.IonToParquet" required
Constant: "io.kestra.plugin.serdes.parquet.IonToParquet"
allowFailure boolean

Default value is : false

Default: false
compressionCodec string

Default value is : GZIP

Default: "GZIP"
Values: "UNCOMPRESSED" "SNAPPY" "GZIP" "ZSTD"
dateFormat string

Default value is : "yyyy-MM-dd[XXX]"

Default: "yyyy-MM-dd[XXX]"
datetimeFormat string

Default value is yyyy-MM-dd'T'HH:mm[:ss][.SSSSSS][XXX]

Default value is : "yyyy-MM-dd'T'HH:mm[:ss][.SSSSSS][XXX]"

Default: "yyyy-MM-dd'T'HH:mm[:ss][.SSSSSS][XXX]"
decimalSeparator string

Default value is '.'

Default value is : .

Default: "."
description string
dictionaryPageSize integer

Default value is : 1048576

Default: 1048576
disabled boolean

Default value is : false

Default: false
falseValues string[]

Default value is : `- f

  • "false"
  • disabled
  • 0
  • "off"
  • "no"
  • ""`

Default value is : `- f

  • "false"
  • disabled
  • 0
  • "off"
  • "no"
  • ""`
Default:
[
  "f",
  "false",
  "disabled",
  "0",
  "off",
  "no",
  ""
]
inferAllFields boolean

If true, we try to infer all fields with trueValues, trueValues & nullValues.If false, we will infer bool & null only on field declared on schema as null and bool.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
nullValues string[]

Default value is : `- ""

  • "#N/A"
  • "#N/A N/A"
  • "#NA"
  • -1.#IND
  • -1.#QNAN
  • -NaN
  • 1.#IND
  • 1.#QNAN
  • NA
  • n/a
  • nan
  • "null"`

Default value is : `- ""

  • "#N/A"
  • "#N/A N/A"
  • "#NA"
  • -1.#IND
  • -1.#QNAN
  • -NaN
  • 1.#IND
  • 1.#QNAN
  • NA
  • n/a
  • nan
  • "null"`
Default:
[
  "",
  "#N/A",
  "#N/A N/A",
  "#NA",
  "-1.#IND",
  "-1.#QNAN",
  "-NaN",
  "1.#IND",
  "1.#QNAN",
  "NA",
  "n/a",
  "nan",
  "null"
]
pageSize integer

Default value is : 1048576

Default: 1048576
rowGroupSize integer

Default value is : 134217728

Default: 134217728
strictSchema boolean

Default value is false

Default value is : false

Default: false
timeFormat string

Default value is : "HH:mm[:ss][.SSSSSS][XXX]"

Default: "HH:mm[:ss][.SSSSSS][XXX]"
timeZoneId string

If null, the timezone will be UTC Default value is system timezone

Default value is : Etc/UTC

Default: "Etc/UTC"
timeout string
format=duration
trueValues string[]

Default value is : `- t

  • "true"
  • enabled
  • 1
  • "on"
  • "yes"`

Default value is : `- t

  • "true"
  • enabled
  • 1
  • "on"
  • "yes"`
Default:
[
  "t",
  "true",
  "enabled",
  "1",
  "on",
  "yes"
]
version string

Default value is : V2

Default: "V2"
Values: "V1" "V2"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.parquet.ParquetToIon object
Examples

Convert a parquet file to the Amazon Ion format.

id: parquet_to_ion
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/parquet/products.parquet

  - id: to_ion
    type: io.kestra.plugin.serdes.parquet.ParquetToIon
    from: "{{ outputs.http_download.uri }}"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.parquet.ParquetToIon" required
Constant: "io.kestra.plugin.serdes.parquet.ParquetToIon"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.xml.IonToXml object
Examples

Read a CSV file, transform it and store the transformed data as an XML file.

id: ion_to_xml
namespace: company.team

tasks:
  - id: download_csv
    type: io.kestra.plugin.core.http.Download
    description: salaries of data professionals from 2020 to 2023 (source ai-jobs.net)
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/salaries.csv

  - id: avg_salary_by_job_title
    type: io.kestra.plugin.jdbc.duckdb.Query
    inputFiles:
      data.csv: "{{ outputs.download_csv.uri }}"
    sql: |
      SELECT
        job_title,
        ROUND(AVG(salary),2) AS avg_salary
      FROM read_csv_auto('{{ workingDir }}/data.csv', header=True)
      GROUP BY job_title
      HAVING COUNT(job_title) > 10
      ORDER BY avg_salary DESC;
    store: true

  - id: result
    type: io.kestra.plugin.serdes.xml.IonToXml
    from: "{{ outputs.avg_salary_by_job_title.uri }}"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.xml.IonToXml" required
Constant: "io.kestra.plugin.serdes.xml.IonToXml"
allowFailure boolean

Default value is : false

Default: false
charset string

Default value is UTF-8.

Default value is : UTF-8

Default: "UTF-8"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
rootName string

Default value is : items

Default: "items"
timeZoneId string

Default value is : Etc/UTC

Default: "Etc/UTC"
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.xml.XmlToIon object
Examples

Convert an XML file to the Amazon Ion format.

id: xml_to_ion
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/xml/products.xml

  - id: to_ion
    type: io.kestra.plugin.serdes.xml.XmlToIon
    from: "{{ outputs.http_download.uri }}"

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.serdes.xml.XmlToIon" required
Constant: "io.kestra.plugin.serdes.xml.XmlToIon"
allowFailure boolean

Default value is : false

Default: false
charset string

Default value is UTF-8.

Default value is : UTF-8

Default: "UTF-8"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parserConfiguration
All of: XML parser configuration. object, XML parser configuration.
query string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.serdes.xml.XmlToIon-ParserConfiguration object
forceList string[]
io.kestra.plugin.servicenow.Post object
Examples

Create an incident.

id: servicenow_post
namespace: company.team

tasks:
  - id: post
    type: io.kestra.plugin.servicenow.Post
    domain: "snow_domain"
    username: "snow_username"
    password: "snow_password"
    clientId: "snow_client_id"
    clientSecret: "snow_client_secret"
    table: incident
    data:
      short_description: "API Create Incident..."
      requester_id: f8266e2adb16fb00fa638a3a489619d2
      requester_for_id: a7ec77cbdefac300d322d182689619dc
      product_id: 01a2e3c1db15f340d329d18c689ed922

clientId string required
minLength=1
clientSecret string required
minLength=1
data object required
domain string required

Will be used to generate the url: https://[[DOMAIN]].service-now.com/

minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
password string required
minLength=1
table string required
minLength=1
type const: "io.kestra.plugin.servicenow.Post" required
Constant: "io.kestra.plugin.servicenow.Post"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
headers object
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.models.StreamsConfiguration object
propertiesPattern string[]
replicationKeys string
replicationMethod string
Values: "FULL_TABLE" "INCREMENTAL" "LOG_BASED"
selected boolean

Default value is : true

Default: true
stream string
io.kestra.plugin.singer.taps.BigQuery object

Full documentation can be found here

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
limit integer required
startDateTime string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date-time
streams array required
minItems=1
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.BigQuery" required
Constant: "io.kestra.plugin.singer.taps.BigQuery"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
endDateTime string
format=date-time
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
serviceAccount string
startAlwaysInclusive boolean

This could cause records to be missed that were created after the last run finished, but during the same second and with the same timestamp.

Default value is : true

Default: true
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.BigQuery-Stream object
columns string[]
datetime_key string
filters string[]

filters are optional but we strongly recommend using this over a large partitioned table to control the cost.

name string
table string
io.kestra.plugin.singer.taps.BingAds object

Full documentation can be found here

accountIds string[] required
minItems=1
customerId string required
minLength=1
developerToken string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
oauthClientId string required
minLength=1
oauthClientSecret string required
minLength=1
refreshToken string required
minLength=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.BingAds" required
Constant: "io.kestra.plugin.singer.taps.BingAds"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.ChargeBee object

Full documentation can be found here

apiKey string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
site string required

mostly in the form {site}.chargebee.com

minLength=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.ChargeBee" required
Constant: "io.kestra.plugin.singer.taps.ChargeBee"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
productCatalog string

Default value is : 1.0

Default: "1.0"
minLength=1
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.ExchangeRateHost object

Full documentation can be found here

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.ExchangeRateHost" required
Constant: "io.kestra.plugin.singer.taps.ExchangeRateHost"
allowFailure boolean

Default value is : false

Default: false
base string

Default value is : EUR

Default: "EUR"
minLength=1
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
endDate string
format=date
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.FacebookAds object

Full documentation can be found here

accessToken string required
minLength=1
accountId string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.FacebookAds" required
Constant: "io.kestra.plugin.singer.taps.FacebookAds"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
endDate string
format=date
insightsBufferDays integer

Default value is : 0

Default: 0
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Fastly object

Full documentation can be found here

apiToken string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Fastly" required
Constant: "io.kestra.plugin.singer.taps.Fastly"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.GenericTap object
command string required
configs object required

Will be save on config.json and used as arguments

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
pipPackages string[] required
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.GenericTap" required
Constant: "io.kestra.plugin.singer.taps.GenericTap"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
features string[]

Default value is : `- PROPERTIES

  • DISCOVER
  • STATE`

Default value is : `- PROPERTIES

  • DISCOVER
  • STATE`
Default:
[
  "PROPERTIES",
  "DISCOVER",
  "STATE"
]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.GitHub object

Full documentation can be found here

accessToken string required

Login to your GitHub account, go to the Personal Access Tokens settings page, and generate a new token with at least the repo scope.

minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
repositories required

The repo path is relative to https://github.com/. For example the path for this repository is kestra-io/kestra.

startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.GitHub" required
Constant: "io.kestra.plugin.singer.taps.GitHub"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
requestTimeout integer

Default value is : 300

Default: 300
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Gitlab object

Full documentation can be found here

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
private_token string required
minLength=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Gitlab" required
Constant: "io.kestra.plugin.singer.taps.Gitlab"
allowFailure boolean

Default value is : false

Default: false
apiUrl string

When an API path is omitted, /api/v4/ is assumed.

Default value is : https://gitlab.com

Default: "https://gitlab.com"
minLength=1
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
fetchMergeRequestCommits boolean

This can slow down extraction considerably because of the many API calls required.

Default value is : false

Default: false
fetchPipelinesExtended boolean

This can slow down extraction considerably because of the many API calls required.

Default value is : false

Default: false
groups string[]

Leave empty and provide a project name if you'd like to pull data from a project in a personal user namespace.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
projects string[]

Leave empty and provide a group name to extract data from all group projects.

stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
ultimateLicense boolean

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.GoogleAdwords object

Full documentation can be found here

customerIds string[] required
minItems=1
developerToken string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
oauthClientId string required
minLength=1
oauthClientSecret string required
minLength=1
refreshToken string required
minLength=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.GoogleAdwords" required
Constant: "io.kestra.plugin.singer.taps.GoogleAdwords"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
conversionWindowDays integer

Default value is : 0

Default: 0
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
endDate string
format=date
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
primaryKeys object
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
userAgent string

Default value is : tap-adwords via Kestra

Default: "tap-adwords via Kestra"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.GoogleAnalytics object

Full documentation can be found here

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.GoogleAnalytics" required
Constant: "io.kestra.plugin.singer.taps.GoogleAnalytics"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
endDate string
format=date
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
oauthAccessToken string
oauthClientId string
oauthClientSecret string
oauthRefreshToken string
pipPackages string[]
reports array
serviceAccount string
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
view_id string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.GoogleAnalytics-Report object
dimensions string[]
metrics string[]
name string
io.kestra.plugin.singer.taps.GoogleSearchConsole object

Full documentation can be found here

clientId string required
minLength=1
clientSecret string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
refreshToken string required
minLength=1
siteUrls string[] required

Do not include the domain-level property in the list.

minItems=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.GoogleSearchConsole" required
Constant: "io.kestra.plugin.singer.taps.GoogleSearchConsole"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
userAgent string

Default value is : tap-google-search-console via Kestra

Default: "tap-google-search-console via Kestra"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.HubSpot object

Full documentation can be found here

accessToken string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.HubSpot" required
Constant: "io.kestra.plugin.singer.taps.HubSpot"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
flatteningEnabled boolean

Default value is : false

Default: false
flatteningMaxDepth integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
streamMapConfig object
streamMaps object

For more information check out Stream Maps.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Marketo object

Full documentation can be found here

clientId string required
minLength=1
clientSecret string required
minLength=1
endpoint string required

The base URL contains the account id (a.k.a. Munchkin id) and is therefore unique for each Marketo subscription. Your base URL is found by logging into Marketo and navigating to the Admin > Integration > Web Services menu. It is labeled as “Endpoint:” underneath the “REST API” section as shown in the following screenshots.

minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
identity string required

Identity is found directly below the endpoint entry.https://developers.marketo.com/rest-api/base-url/

minLength=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Marketo" required
Constant: "io.kestra.plugin.singer.taps.Marketo"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Netsuite object

Full documentation can be found here

accountId string required

This can be found under Setup -> Company -> Company Information. Look for Account Id. Note _SB is for Sandbox account.

minLength=1
consumerKey string required

Visit this page for details.

minLength=1
consumerSecret string required

Visit this page for details.

minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
isSandbox boolean required

This should always be set to true if you are connecting Production account of NetSuite. Set it to false if you want to connect to SandBox account.

selectFieldsByDefault boolean required

When new fields are discovered in NetSuite objects, the select_fields_by_default key describes whether or not the tap will select those fields by default.

startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
tokenKey string required

Visit this page for details.

minLength=1
tokenSecret string required

Visit this page for details.

minLength=1
type const: "io.kestra.plugin.singer.taps.Netsuite" required
Constant: "io.kestra.plugin.singer.taps.Netsuite"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.PipelinewiseMongoDb object

Full documentation can be found here

authDatabase string required
database string required
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port integer required
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.PipelinewiseMongoDb" required
Constant: "io.kestra.plugin.singer.taps.PipelinewiseMongoDb"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
awaitTimeMs integer

For LOG_BASED only.

Default value is : 1000

Default: 1000
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
includeSchemaInStream boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
pipPackages string[]
replicaSet string
ssl boolean

Default value is : false

Default: false
sslVerify boolean

Default value is : true

Default: true
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
updateBufferSize integer

For LOG_BASED only, the buffer is flushed once the size is reached.

Default value is : 1

Default: 1
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.PipelinewiseMysql object

Full documentation can be found here##### Examples

host: 127.0.0.1
username: root
password: mysql_passwd
port: 63306
streamsConfigurations:
  - stream: Category
    replicationMethod: INCREMENTAL
    replicationKeys: categoryId
    selected: true
  - propertiesPattern:
      - description
    selected: false
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port integer required
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.PipelinewiseMysql" required
Constant: "io.kestra.plugin.singer.taps.PipelinewiseMysql"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
exportBatchRows integer

Default value is : 50000

Default: 50000
filterDbs string[]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
pipPackages string[]
sessionSqls string[]

Default value is : `- SET @@session.time_zone="+0:00"

  • SET @@session.wait_timeout=28800
  • SET @@session.net_read_timeout=3600
  • SET @@session.innodb_lock_wait_timeout=3600`

Default value is : `- SET @@session.time_zone="+0:00"

  • SET @@session.wait_timeout=28800
  • SET @@session.net_read_timeout=3600
  • SET @@session.innodb_lock_wait_timeout=3600`
Default:
[
  "SET @@session.time_zone=\"+0:00\"",
  "SET @@session.wait_timeout=28800",
  "SET @@session.net_read_timeout=3600",
  "SET @@session.innodb_lock_wait_timeout=3600"
]
ssl boolean

Default value is : false

Default: false
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.PipelinewiseOracle object

Full documentation can be found here##### Examples

host: 127.0.0.1
username: oracle
password: oracle_passwd
port: 63306
sid: ORCL
streamsConfigurations:
  - stream: Category
    replicationMethod: INCREMENTAL
    replicationKeys: categoryId
    selected: true
  - propertiesPattern:
      - description
    selected: false
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port integer required
sid string required
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.PipelinewiseOracle" required
Constant: "io.kestra.plugin.singer.taps.PipelinewiseOracle"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
filterSchemas string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.PipelinewisePostgres object

Full documentation can be found here

host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port integer required
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.PipelinewisePostgres" required
Constant: "io.kestra.plugin.singer.taps.PipelinewisePostgres"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
breakAtEndLsn boolean

Default value is : true

Default: true
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
dbName string
debugLsn boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
filterSchemas string[]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
logicalPollSeconds integer

Default value is : 10800

Default: 10800
maxRunSeconds integer

Default value is : 43200

Default: 43200
password string
pipPackages string[]
ssl boolean

Default value is : false

Default: false
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.PipelinewiseSqlServer object

Full documentation can be found here##### Examples

host: 127.0.0.1
username: SA
password: sqlserver_passwd
port: 57037
filterDbs: dbo
streamsConfigurations:
  - stream: Categories
    replicationMethod: INCREMENTAL
    replicationKeys: CategoryID
    selected: true
  - propertiesPattern:
      - Description
    selected: false
database string required
minLength=1
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
password string required
minLength=1
port integer required
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.PipelinewiseSqlServer" required
Constant: "io.kestra.plugin.singer.taps.PipelinewiseSqlServer"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
characterSet string
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
cursorArraySize integer

The common query tuning scenario is for SELECT statements that return a large number of rows over a slow network. Increasing arraysize can improve performance by reducing the number of round-trips to the database. However increasing this value increases the amount of memory required.

description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
filterDbs string[]
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
tdsVersion string
timeout string
format=duration
useDateDatatype boolean

Default value is : true

Default: true
useSingerDecimal boolean

When true, the resulting SCHEMA message will contain an attribute in additionalProperties containing the scale and precision of the discovered property.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Quickbooks object

Full documentation can be found here

clientId string required
minLength=1
clientSecret string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
realmId string required
minLength=1
refreshToken string required
minLength=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Quickbooks" required
Constant: "io.kestra.plugin.singer.taps.Quickbooks"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
isSandbox boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxWorkers integer

Default value is : 8

Default: 8
pipPackages string[]
selectFieldsByDefault boolean

Default value is : true

Default: true
stateMessageThreshold integer

Default value is : 1000

Default: 1000
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Recharge object

Full documentation can be found here

accessToken string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Recharge" required
Constant: "io.kestra.plugin.singer.taps.Recharge"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
userAgent string

User agent to send to ReCharge along with API requests. Typically includes name of integration and an email address you can be reached at.

workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.SageIntacct object

Full documentation can be found here

companyId string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
senderId string required
minLength=1
senderPassword string required
minLength=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.SageIntacct" required
Constant: "io.kestra.plugin.singer.taps.SageIntacct"
userId string required
minLength=1
userPassword string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Salesforce object

Full documentation can be found here

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Salesforce" required
Constant: "io.kestra.plugin.singer.taps.Salesforce"
allowFailure boolean

Default value is : false

Default: false
apiType string

Default value is : BULK

Default: "BULK"
Values: "REST" "BULK"
clientId string
clientSecret string
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
isSandbox boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxWorkers integer

Default value is : 8

Default: 8
password string
pipPackages string[]
refreshToken string
securityToken string
selectFieldsByDefault boolean

Default value is : true

Default: true
stateMessageThreshold integer

Default value is : 1000

Default: 1000
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Shopify object

Full documentation can be found here

apiKey string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
shop string required

Ex. my-first-store

minLength=1
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Shopify" required
Constant: "io.kestra.plugin.singer.taps.Shopify"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Slack object

Full documentation can be found here

apiToken string required

More details on Slack Access Tokens can be found here.

minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Slack" required
Constant: "io.kestra.plugin.singer.taps.Slack"
allowFailure boolean

Default value is : false

Default: false
archivedChannels boolean

Specifies whether the tap will sync archived channels or not. Note that a bot cannot join an archived channel, so unless the bot was added to the channel prior to it being archived it will not be able to sync the data from that channel.

Default value is : false

Default: false
channels string[]

By default the tap will sync all channels it has been invited to, but this can be overridden to limit it to specific channels. Note this needs to be channel ID, not the name, as recommended by the Slack API. To get the ID for a channel, either use the Slack API or find it in the URL.

command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
dateWindowSize integer

Due to the potentially high volume of data when syncing certain streams (messages, files, threads) this tap implements date windowing based on a configuration parameter.5 means the tap to sync 5 days of data per request, for applicable streams.

Default value is : 7

Default: 7
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
privateChannels boolean

Specifies whether to sync private channels or not.

Default value is : true

Default: true
publicChannels boolean

Specifies whether to have the tap auto-join all public channels in your ogranziation.

Default value is : false

Default: false
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Stripe object

Full documentation can be found here

accountId string required

Ex. acct_1a2b3c4d5e

minLength=1
clientSecret string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Stripe" required
Constant: "io.kestra.plugin.singer.taps.Stripe"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Zendesk object

Full documentation can be found here

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
startDate string required

Please be aware that the larger the time period and amount of data, the longer the initial extraction can be expected to take.

format=date
streamsConfigurations array required
minItems=1
subdomain string required
minLength=1
type const: "io.kestra.plugin.singer.taps.Zendesk" required
Constant: "io.kestra.plugin.singer.taps.Zendesk"
accessToken string
allowFailure boolean

Default value is : false

Default: false
apiToken string
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
email string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.taps.Zoom object

Full documentation can be found here

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
streamsConfigurations array required
minItems=1
type const: "io.kestra.plugin.singer.taps.Zoom" required
Constant: "io.kestra.plugin.singer.taps.Zoom"
allowFailure boolean

Default value is : false

Default: false
clientId string
clientSecret string
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
jwt string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
refreshToken string
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.AdswerveBigQuery object

Full documentation can be found here

datasetId string required
minLength=1
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
projectId string required
minLength=1
type const: "io.kestra.plugin.singer.targets.AdswerveBigQuery" required
Constant: "io.kestra.plugin.singer.targets.AdswerveBigQuery"
addMetadataColumns boolean

Add _time_extracted and _time_loaded metadata columns.

Default value is : false

Default: false
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
location string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxCache integer

Default value is : 50

Default: 50
mergeStateMessages boolean

default: merges multiple state messages from the tap into the state file, if true : uses the last state message as the state file.

Default value is : false

Default: false
pipPackages string[]
replicationMethod string

Default value is : append

Default: "append"
Values: "append" "truncate"
serviceAccount string
stateName string

Default value is : singer-state

Default: "singer-state"
tableConfigs object
tablePrefix string
tableSuffix string
taskRunner
timeout string
format=duration
validateRecords boolean

This option is disabled by default and invalid RECORD messages will fail only at load time by Postgres. Enabling this option will detect invalid records earlier but could cause performance degradation..

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.Csv object

Full documentation can be found here

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.singer.targets.Csv" required
Constant: "io.kestra.plugin.singer.targets.Csv"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
delimiter string

Default value is : ","

Default: ","
minLength=1
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
quoteCharacters string

Default value is : '"'

Default: """
minLength=1
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.DatamillCoPostgres object

Full documentation can be found here

from string required
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port integer required
type const: "io.kestra.plugin.singer.targets.DatamillCoPostgres" required
Constant: "io.kestra.plugin.singer.targets.DatamillCoPostgres"
username string required
minLength=1
addUpsertIndexes boolean

These indexes will make data loading slightly slower but the deduplication phase much faster. Defaults to on for better baseline performance.

Default value is : true

Default: true
afterRunSql string
allowFailure boolean

Default value is : false

Default: false
batchDetectionThreshold integer

There's a slight performance penalty to checking the buffered records count or bytesize, so this controls how often this is polled in order to mitigate the penalty. This value is usually not necessary to set as the default is dynamically adjusted to check reasonably often.

Default is 5000, or 1/40th maxBatchRows

beforeRunSql string

Useful for setup like SET ROLE or other connection state that is important.

command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
dbName string
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
invalidRecordsDetect boolean

Default value is : true

Default: true
invalidRecordsThreshold integer

Default value is : 0

Default: 0
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
loggingLevel string

Set to DEBUG to get things like queries executed, timing of those queries, etc. See Python's Logger Levels for information about valid values.

Default value is : INFO

Default: "INFO"
maxBatchRows integer

Default value is : 200000

Default: 200000
maxBufferSize integer

Default value is : 104857600

Default: 104857600
password string
persistEmptyTables boolean

Default value is : false

Default: false
pipPackages string[]
schema string

Default value is : public

Default: "public"
sslMode string

Default value is : prefer

Default: "prefer"
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.GenericTarget object
command string required
configs object required

Will be save on config.json and used as arguments

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
pipPackages string[] required
type const: "io.kestra.plugin.singer.targets.GenericTarget" required
Constant: "io.kestra.plugin.singer.targets.GenericTarget"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.Json object

Full documentation can be found here

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.singer.targets.Json" required
Constant: "io.kestra.plugin.singer.targets.Json"
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.MeltanoSnowflake object

Full documentation can be found here

account string required

(i.e. rtXXXXX.eu-central-1)

minLength=1
database string required
minLength=1
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
schema string required
minLength=1
type const: "io.kestra.plugin.singer.targets.MeltanoSnowflake" required
Constant: "io.kestra.plugin.singer.targets.MeltanoSnowflake"
username string required
minLength=1
warehouse string required
minLength=1
addRecordMetadata boolean

Default value is : true

Default: true
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
defaultTargetSchema string
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
flatteningEnabled boolean
flatteningMaxDepth integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
password string
pipPackages string[]
role string
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.Oracle object

Full documentation can be found here

from string required
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
password string required
minLength=1
port integer required
type const: "io.kestra.plugin.singer.targets.Oracle" required
Constant: "io.kestra.plugin.singer.targets.Oracle"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
database string
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
driverName string
flatteningEnabled boolean
flatteningMaxDepth integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
preferFloatOverNumeric boolean
stateName string

Default value is : singer-state

Default: "singer-state"
streamMapConfig string
streamMaps string
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.PipelinewisePostgres object

Full documentation can be found here

from string required
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port integer required
type const: "io.kestra.plugin.singer.targets.PipelinewisePostgres" required
Constant: "io.kestra.plugin.singer.targets.PipelinewisePostgres"
username string required
minLength=1
addMetadataColumns boolean

Metadata columns add extra row level information about data ingestions, (i.e. when was the row read in source, when was inserted or deleted in postgres etc.) Metadata columns are creating automatically by adding extra columns to the tables with a column prefix _SDC_. The column names are following the stitch naming conventions. Enabling metadata columns will flag the deleted rows by setting the _SDC_DELETED_AT metadata column. Without the add_metadata_columns option the deleted rows from singer taps will not be recognisable in Postgres.

Default value is : false

Default: false
allowFailure boolean

Default value is : false

Default: false
batchSizeRows integer

At the end of each batch, the rows in the batch are loaded into Postgres.

Default value is : 100000

Default: 100000
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
dataFlatteningMaxLevel integer

When value is 0 (default) then flattening functionality is turned off.

Default value is : 0

Default: 0
dbName string
defaultTargetSchema string

If schemaMapping is not defined then every stream sent by the tap is loaded into this schema.

defaultTargetSchemaSelectPermission string
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
flushAllStreams boolean

Warning: This may trigger the COPY command to use files with low number of records..

Default value is : false

Default: false
hardDelete boolean

When hard_delete option is true then DELETE SQL commands will be performed in Postgres to delete rows in tables. It's achieved by continuously checking the _SDC_DELETED_AT metadata column sent by the singer tap. Due to deleting rows requires metadata columns, hard_delete option automatically enables the add_metadata_columns option as well.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxParallelism integer

Default value is : 16

Default: 16
parallelism integer

0 will create a thread for each stream, up to parallelism_max. -1 will create a thread for each CPU core. Any other positive number will create that number of threads, up to parallelism_max.

Default value is : 0

Default: 0
password string
pipPackages string[]
primaryKeyRequired boolean

When set to true, stop loading data if no Primary Key is defined.

Default value is : true

Default: true
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
validateRecords boolean

This option is disabled by default and invalid RECORD messages will fail only at load time by Postgres. Enabling this option will detect invalid records earlier but could cause performance degradation..

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.PipelinewiseRedshift object

Full documentation can be found here

defaultTargetSchema string required

If schema_mapping is not defined then every stream sent by the tap is loaded into this schema.

minLength=1
from string required
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
port integer required
s3Bucket string required
minLength=1
type const: "io.kestra.plugin.singer.targets.PipelinewiseRedshift" required
Constant: "io.kestra.plugin.singer.targets.PipelinewiseRedshift"
username string required
minLength=1
accessKeyId string

Used for S3 and Redshift copy operations.

addMetadataColumns boolean

Metadata columns add extra row level information about data ingestions, (i.e. when was the row read in source, when was inserted or deleted in redshift etc.) Metadata columns are creating automatically by adding extra columns to the tables with a column prefix SDC. The metadata columns are documented at here. Enabling metadata columns will flag the deleted rows by setting the _SDC_DELETED_AT metadata column. Without the addMetadataColumns option the deleted rows from singer taps will not be recongisable in Redshift.

Default value is : false

Default: false
allowFailure boolean

Default value is : false

Default: false
batchSizeRows integer

At the end of each batch, the rows in the batch are loaded into Redshift.

Default value is : 100000

Default: 100000
command string
compression string

Default value is : bzip2

Default: "bzip2"
Values: "gzip" "bzip2"
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
copyOptions string

Parameters to use in the COPY command when loading data to Redshift. Some basic file formatting parameters are fixed values and not recommended overriding them by custom values. They are like: CSV GZIP DELIMITER ',' REMOVEQUOTES ESCAPE.

dataFlatteningMaxLevel integer

When hardDelete option is true then DELETE SQL commands will be performed in Redshift to delete rows in tables. It's achieved by continuously checking the _SDC_DELETED_AT metadata column sent by the singer tap. Due to deleting rows requires metadata columns, hardDelete option automatically enables the addMetadataColumns option as well..

Default value is : 0

Default: 0
dbName string
defaultTargetSchemaSelectPermissions string

If schemaMapping is not defined then every stream sent by the tap is granted accordingly.

description string
disableTableCache boolean

By default the connector caches the available table structures in Redshift at startup. In this way it doesn't need to run additional queries when ingesting data to check if altering the target tables is required. With disable_table_cache option you can turn off this caching. You will always see the most recent table structures but will cause an extra query runtime.

Default value is : false

Default: false
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
flushAllStreams boolean

Warning: This may trigger the COPY command to use files with low number of records..

Default value is : false

Default: false
hardDelete boolean

When hardDelete option is true then DELETE SQL commands will be performed in Redshift to delete rows in tables. It's achieved by continuously checking the _SDC_DELETED_AT metadata column sent by the singer tap. Due to deleting rows requires metadata columns, hardDelete option automatically enables the addMetadataColumns option as well.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxParallelism integer

Default value is : 16

Default: 16
parallelism integer

0 will create a thread for each stream, up to parallelism_max. -1 will create a thread for each CPU core. Any other positive number will create that number of threads, up to parallelism_max.

Default value is : 0

Default: 0
password string
pipPackages string[]
primaryKeyRequired boolean

When set to true, stop loading data if no Primary Key is defined..

Default value is : true

Default: true
redshiftCopyRoleArn string

AWS Role ARN to be used for the Redshift COPY operation. Used instead of the given AWS keys for the COPY operation if provided - the keys are still used for other S3 operations.

s3Acl string

S3 Object ACL.

s3KeyPrefix string

A static prefix before the generated S3 key names. Using prefixes you can upload files into specific directories in the S3 bucket. Default(None).

schema_mapping string

Useful if you want to load multiple streams from one tap to multiple Redshift schemas. If the tap sends the stream_id in <schema_name>-<table_name> format then this option overwrites the default_target_schema value. Note, that using schema_mapping you can overwrite the default_target_schema_select_permissions value to grant SELECT permissions to different groups per schemas or optionally you can create indices automatically for the replicated tables.

secretAccessKey string

Used for S3 and Redshift copy operations.

sessionToken string

S3 AWS STS token for temporary credentials.

skipUpdates boolean

Useful to improve performance when records are immutable, e.g. events.

Default value is : false

Default: false
slices integer

This should be set to the number of Redshift slices. The number of slices per node depends on the node size of the cluster - run SELECT COUNT(DISTINCT slice) slices FROM stv_slices to calculate this.

Default value is : 1

Default: 1
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
validateRecords boolean

This option is disabled by default and invalid RECORD messages will fail only at load time by Redshift. Enabling this option will detect invalid records earlier but could cause performance degradation..

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.PipelinewiseSnowflake object

Full documentation can be found here

account string required

(i.e. rtXXXXX.eu-central-1)

minLength=1
database string required
minLength=1
from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.singer.targets.PipelinewiseSnowflake" required
Constant: "io.kestra.plugin.singer.targets.PipelinewiseSnowflake"
username string required
minLength=1
warehouse string required
minLength=1
addMetadataColumns boolean

Default value is : false

Default: false
allowFailure boolean

Default value is : false

Default: false
archiveLoadFiles boolean

Default value is : false

Default: false
archiveLoadFilesS3Bucket string
archiveLoadFilesS3Prefix string
awsAccessKeyId string
awsProfile string
awsSecretAccessKey string
awsSessionToken string
batchSizeRows integer

Default value is : 100000

Default: 100000
batchWaitLimit string
format=duration
clientSideEncryptionMasterKey string
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
dataFlatteningMaxLevel integer

Default value is : 0

Default: 0
defaultTargetSchema string
defaultTargetSchemaSelectPermission string
description string
disableTableCache boolean

Default value is : false

Default: false
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
fileFormat string
flushAllStreams boolean

Default value is : false

Default: false
hardDelete boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
noCompression boolean

Default value is : false

Default: false
parallelism integer

Default value is : 0

Default: 0
parallelismMax integer

Default value is : 16

Default: 16
password string
pipPackages string[]
primaryKeyRequired boolean

Default value is : true

Default: true
queryTag string
role string
s3Acl string
s3Bucket string
s3EndpointUrl string
s3KeyPrefix string
s3RegionName string
schemaMapping string
stage string
stateName string

Default value is : singer-state

Default: "singer-state"
taskRunner
timeout string
format=duration
validateRecords boolean

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.singer.targets.SqlServer object

Full documentation can be found here

database string required
minLength=1
from string required
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
password string required
minLength=1
port integer required
type const: "io.kestra.plugin.singer.targets.SqlServer" required
Constant: "io.kestra.plugin.singer.targets.SqlServer"
username string required
minLength=1
allowFailure boolean

Default value is : false

Default: false
command string
containerImage string

Default value is : python:3.10.12

Default: "python:3.10.12"
defaultTargetSchema string
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
flatteningEnabled boolean
flatteningMaxDepth integer
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
pipPackages string[]
preferFloatOverNumeric boolean
stateName string

Default value is : singer-state

Default: "singer-state"
streamMapConfig string
streamMaps string
tablePrefix string
taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.soda.Scan object
Examples

Run a scan on BigQuery.

id: soda_scan
namespacae: company.team

tasks:
  - id: scan
    type: io.kestra.plugin.soda.Scan
    configuration:
      data_source kestra:
        type: bigquery
        connection:
          project_id: kestra-unit-test
          dataset: demo
          account_info_json: |
            {{ secret('GCP_CREDS') }}
    checks:
      checks for orderDetail:
        - row_count > 0
        - max(unitPrice):
            warn: when between 1 and 250
            fail: when > 250
      checks for territory:
        - row_count > 0
        - failed rows:
            name: Failed rows query test
            fail condition: regionId = 4
    requirements:
      - soda-core-bigquery

checks object required
configuration object required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
requirements string[] required

Python dependencies list to setup in the virtualenv, in the same format than requirements.txt. It must at least provides dbt.

type const: "io.kestra.plugin.soda.Scan" required
Constant: "io.kestra.plugin.soda.Scan"
allowFailure boolean

Default value is : false

Default: false
containerImage string

Default value is : sodadata/soda-core

Default: "sodadata/soda-core"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
dockerOptions
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use the `docker` property instead
env Record<string, string>
inputFiles Record<string, string>

You can define the files as map or a JSON string. Each file can be defined inlined or can reference a file from Kestra's internal storage.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
runner string

Deprecated, use 'taskRunner' instead

Values: "PROCESS" "DOCKER"
taskRunner
timeout string
format=duration
variables object
verbose boolean

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.solace.Consume object
Examples

Consume messages from a Solace queue.

id: consume_message_from_solace_queue
namespace: company.team

tasks:
  - id: consume_from_solace
    type: io.kestra.plugin.solace.Consume
    host: localhost:55555
    username: admin
    password: admin
    vpn: default
    messageDeserializer: JSON
    queueName: test_queue
    queueType: DURABLE_EXCLUSIVE

host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queueName string required
queueType string required
Values: "DURABLE_EXCLUSIVE" "DURABLE_NON_EXCLUSIVE" "NON_DURABLE_EXCLUSIVE"
type const: "io.kestra.plugin.solace.Consume" required
Constant: "io.kestra.plugin.solace.Consume"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 10.000000000

Default: 10.0
format=duration
maxMessages integer

Default value is : 100

Default: 100
messageDeserializer string

Default value is : STRING

Default: "STRING"
Values: "STRING" "BINARY" "ION" "JSON"
messageDeserializerProperties object

Configs in key/value pairs.

Default value is : {}

Default:
{}
messageSelector string

Enables support for message selection based on message header parameter and message properties values.

password string
properties object

Default value is : {}

Default:
{}
timeout string
format=duration
username string
vpn string

Default value is : default

Default: "default"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.solace.Produce object
Examples

Publish a file as messages into a Solace Broker.

id: send_messages_to_solace_queue
namespace: company.team

inputs:
  - id: file
    type: FILE
    description: a CSV file with columns id, username, tweet, and timestamp

tasks:
  - id: read_csv_file
    type: io.kestra.plugin.serdes.csv.CsvToIon
    from: "{{ inputs.file }}"

  - id: transform_row_to_json
    type: io.kestra.plugin.scripts.nashorn.FileTransform
    from: "{{ outputs.read_csv_file.uri }}"
    script: |
      var result = {
        "payload": {
          "username": row.username,
          "tweet": row.tweet
        },
        "properties": {
            "correlationId": "42"
        }
      };
      row = result

  - id: send_message_to_solace
    type: io.kestra.plugin.solace.Produce
    from: "{{ outputs.transform_row_to_json.uri }}"
    topicDestination: test/tweets
    host: localhost:55555
    username: admin
    password: admin
    vpn: default
    messageSerializer: "JSON"

from string | array | object required

Can be an internal storage URI, a map (i.e. a list of key-value pairs) or a list of maps. The following keys are supported: payload, properties.

host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
topicDestination string required
type const: "io.kestra.plugin.solace.Produce" required
Constant: "io.kestra.plugin.solace.Produce"
allowFailure boolean

Default value is : false

Default: false
awaitAcknowledgementTimeout string

Default value is : 60.000000000

Default: 60.0
format=duration
deliveryMode string

Default value is : PERSISTENT

Default: "PERSISTENT"
Values: "DIRECT" "PERSISTENT"
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
messageProperties object

Additional properties must be provided with Key of type String and Value of type String. Each key can be customer provided, or it can be a Solace message properties.

Default value is : {}

Default:
{}
messageSerializer string

Default value is : STRING

Default: "STRING"
Values: "STRING" "BINARY" "ION" "JSON"
messageSerializerProperties object

Configs in key/value pairs.

Default value is : {}

Default:
{}
password string
properties object

Default value is : {}

Default:
{}
timeout string
format=duration
username string
vpn string

Default value is : default

Default: "default"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.solace.Trigger object
Examples

Trigger flow based on messages received from a Solace broker.

id: trigger_from_solace_queue
namespace: company.team

tasks:
  - id: hello
    type: io.kestra.plugin.core.log.Log
    message: Hello there! I received {{ trigger.messagesCount }} from Solace!

triggers:
  - id: read_from_solace
    type: io.kestra.plugin.solace.Trigger
    interval: PT30S
    host: localhost:55555
    username: admin
    password: admin
    vpn: default
    messageDeserializer: JSON
    queueName: test_queue
    queueType: DURABLE_EXCLUSIVE

host string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
queueName string required
queueType string required
Values: "DURABLE_EXCLUSIVE" "DURABLE_NON_EXCLUSIVE" "NON_DURABLE_EXCLUSIVE"
type const: "io.kestra.plugin.solace.Trigger" required
Constant: "io.kestra.plugin.solace.Trigger"
conditions array
description string
disabled boolean

Default value is : false

Default: false
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDuration string

Default value is : 10.000000000

Default: 10.0
format=duration
maxMessages integer

Default value is : 100

Default: 100
messageDeserializer string

Default value is : STRING

Default: "STRING"
Values: "STRING" "BINARY" "ION" "JSON"
messageDeserializerProperties object

Configs in key/value pairs.

Default value is : {}

Default:
{}
messageSelector string

Enables support for message selection based on message header parameter and message properties values.

password string
properties object

Default value is : {}

Default:
{}
stopAfter string[]
username string
vpn string

Default value is : default

Default: "default"
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.spark.JarSubmit object
Examples
id: spark_jar_submit
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: jar_submit
    type: io.kestra.plugin.spark.JarSubmit
    runner: DOCKER
    master: spark://localhost:7077
    mainResource: {{ inputs.file }}
    mainClass: spark.samples.App

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
mainClass string required
mainResource string required

This should be the location of a JAR file for Scala/Java applications, or a Python script for PySpark applications. Must be an internal storage URI.

master string required

Spark master URL formats.

type const: "io.kestra.plugin.spark.JarSubmit" required
Constant: "io.kestra.plugin.spark.JarSubmit"
allowFailure boolean

Default value is : false

Default: false
appFiles Record<string, string>

Must be an internal storage URI.

args string[]
configurations Record<string, string>
containerImage string

Default value is : bitnami/spark

Default: "bitnami/spark"
deployMode string
Values: "CLIENT" "CLUSTER"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
jars Record<string, string>

Must be an internal storage URI.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
name string
runner string

Deprecated - use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
sparkSubmitPath string

Default value is : spark-submit

Default: "spark-submit"
taskRunner
timeout string
format=duration
verbose boolean

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.spark.PythonSubmit object
Examples
id: spark_python_submit
namespace: company.team

tasks:
  - id: python_submit
    type: io.kestra.plugin.spark.PythonSubmit
    runner: DOCKER
    docker:
      networkMode: host
      user: root
    master: spark://localhost:7077
    args:
      - "10"
    mainScript: |
      import sys
      from random import random
      from operator import add
      from pyspark.sql import SparkSession


      if __name__ == "__main__":
          spark = SparkSession               .builder               .appName("PythonPi")               .getOrCreate()

          partitions = int(sys.argv[1]) if len(sys.argv) > 1 else 2
          n = 100000 * partitions

          def f(_: int) -> float:
              x = random() * 2 - 1
              y = random() * 2 - 1
              return 1 if x ** 2 + y ** 2 <= 1 else 0

          count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)
          print("Pi is roughly %f" % (4.0 * count / n))

          spark.stop()

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
mainScript string required
master string required

Spark master URL formats.

type const: "io.kestra.plugin.spark.PythonSubmit" required
Constant: "io.kestra.plugin.spark.PythonSubmit"
allowFailure boolean

Default value is : false

Default: false
appFiles Record<string, string>

Must be an internal storage URI.

args string[]
configurations Record<string, string>
containerImage string

Default value is : bitnami/spark

Default: "bitnami/spark"
deployMode string
Values: "CLIENT" "CLUSTER"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
name string
pythonFiles Record<string, string>

Must be an internal storage URI.

runner string

Deprecated - use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
sparkSubmitPath string

Default value is : spark-submit

Default: "spark-submit"
taskRunner
timeout string
format=duration
verbose boolean

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.spark.RSubmit object
Examples
id: spark_r_submit
namespace: company.team

tasks:
  - id: r_submit
    type: io.kestra.plugin.spark.RSubmit
    runner: DOCKER
    docker:
      networkMode: host
      user: root
    master: spark://localhost:7077
    mainScript: |
      library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
      sparkR.session()

      print("The SparkR session has initialized successfully.")

      sparkR.stop()

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
mainScript string required
master string required

Spark master URL formats.

type const: "io.kestra.plugin.spark.RSubmit" required
Constant: "io.kestra.plugin.spark.RSubmit"
allowFailure boolean

Default value is : false

Default: false
appFiles Record<string, string>

Must be an internal storage URI.

args string[]
configurations Record<string, string>
containerImage string

Default value is : bitnami/spark

Default: "bitnami/spark"
deployMode string
Values: "CLIENT" "CLUSTER"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
name string
runner string

Deprecated - use 'taskRunner' instead.

Values: "PROCESS" "DOCKER"
sparkSubmitPath string

Default value is : spark-submit

Default: "spark-submit"
taskRunner
timeout string
format=duration
verbose boolean

Default value is : false

Default: false
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.spark.SparkCLI object
Examples

Submit a PySpark job to a master node.

id: spark_cli
namespace: company.team

tasks:
  - id: hello
    type: io.kestra.plugin.spark.SparkCLI
    inputFiles:
      pi.py: |
        import sys
        from random import random
        from operator import add
        from pyspark.sql import SparkSession

        if __name__ == "__main__":
            spark = SparkSession                 .builder                 .appName("PythonPi")                 .getOrCreate()

            partitions = int(sys.argv[1]) if len(sys.argv) > 1 else 2
            n = 100000 * partitions

            def f(_: int) -> float:
                x = random() * 2 - 1
                y = random() * 2 - 1
                return 1 if x ** 2 + y ** 2 <= 1 else 0

            count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)
            print("Pi is roughly %f" % (4.0 * count / n))

            spark.stop()
    docker:
      image: bitnami/spark
      networkMode: host
    commands:
      - spark-submit --name Pi --master spark://localhost:7077 pi.py
commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.spark.SparkCLI" required
Constant: "io.kestra.plugin.spark.SparkCLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : bitnami/spark

Default: "bitnami/spark"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated - use the 'taskRunner' property instead.
env Record<string, string>
failFast boolean

If set to false all commands will be executed one after the other. The final state of task execution is determined by the last command. Note that this property maybe be ignored if a non compatible interpreter is specified. You can also disable it if your interpreter does not support the set -eoption.

Default value is : true

Default: true
inputFiles object | string
interpreter string[]

Default value is : `- /bin/sh

  • -c`

Default value is : `- /bin/sh

  • -c`
Default:
[
  "/bin/sh",
  "-c"
]
minItems=1
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputDirectory boolean

Required to use the {{ outputDir }} expression. Note that it could increase the starting time. Deprecated, use the outputFiles property instead.

Default value is : "false"

Default: "false"
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

runner string

Only used if the taskRunner property is not set

Values: "PROCESS" "DOCKER"
targetOS string

Default value is : AUTO

Default: "AUTO"
Values: "LINUX" "WINDOWS" "AUTO"
taskRunner
timeout string
format=duration
warningOnStdErr boolean

Default value is : true

Default: true
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.sqlmesh.cli.SQLMeshCLI object
Examples

Orchestrate a SQLMesh project by automatically applying the plan

id: sqlmesh_transform
namespace: company.team

tasks:
  - id: transform
    type: io.kestra.plugin.sqlmesh.cli.SQLMeshCLI
    beforeCommands:
      - sqlmesh init duckdb
    commands:
      - sqlmesh plan --auto-apply
commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.sqlmesh.cli.SQLMeshCLI" required
Constant: "io.kestra.plugin.sqlmesh.cli.SQLMeshCLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : ghcr.io/kestra-io/sqlmesh

Default: "ghcr.io/kestra-io/sqlmesh"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.surrealdb.Query object
Examples

Send a SurrealQL query to a SurrealDB database.

id: surrealdb_query
namespace: company.team

tasks:
  - id: select
    type: io.kestra.plugin.surrealdb.Query
    useTls: true
    host: localhost
    port: 8000
    username: surreal_user
    password: surreal_passwd
    database: surreal_db
    namespace: surreal_namespace
    query: SELECT * FROM SURREAL_TABLE
    fetchType: STORE

database string required
minLength=1
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
minLength=1
query string required
minLength=1
type const: "io.kestra.plugin.surrealdb.Query" required
Constant: "io.kestra.plugin.surrealdb.Query"
allowFailure boolean

Default value is : false

Default: false
connection object
connectionTimeout integer

Default value is : 60

Default: 60
exclusiveMin=0
description string
disabled boolean

Default value is : false

Default: false
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parameters object

See SurrealDB documentation about SurrealQL Prepared Statements for query syntax.This should be supplied with a parameter map using named parameters.

Default value is : {}

Default:
{}
password string
port integer

Default value is : 8000

Default: 8000
exclusiveMin=0
timeout string
format=duration
useTls boolean

Default value is : false

Default: false
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.surrealdb.Trigger object
Examples

Wait for SurrealQL query to return results, and then iterate through rows.

id: surrealdb_trigger
namespace: company.team

tasks:
  - id: each
    type: io.kestra.plugin.core.flow.EachSequential
    tasks:
      - id: return
        type: io.kestra.plugin.core.debug.Return
        format: "{{ json(taskrun.value) }}"
    value: "{{ trigger.rows }}"

triggers:
  - id: watch
    type: io.kestra.plugin.surrealdb.Trigger
    interval: "PT5M"
    host: localhost
    port: 8000
    username: surreal_user
    password: surreal_passwd
    namespace: surreal_namespace
    database: surreal_db
    fetchType: FETCH
    query: SELECT * FROM SURREAL_TABLE
database string required
minLength=1
host string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
namespace string required
minLength=1
query string required
minLength=1
type const: "io.kestra.plugin.surrealdb.Trigger" required
Constant: "io.kestra.plugin.surrealdb.Trigger"
conditions array
connectionTimeout integer

Default value is : 60

Default: 60
exclusiveMin=0
description string
disabled boolean

Default value is : false

Default: false
fetchType string

FETCH_ONE - output the first row. FETCH - output all rows as output variable. STORE - store all rows to a file. NONE - do nothing.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
interval string

The interval between 2 different polls of schedule, this can avoid to overload the remote system with too many calls. For most of the triggers that depend on external systems, a minimal interval must be at least PT30S. See ISO_8601 Durations for more information of available interval values.

Default value is : 60.000000000

Default: 60.0
format=duration
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
parameters object

See SurrealDB documentation about SurrealQL Prepared Statements for query syntax.This should be supplied with a parameter map using named parameters.

Default value is : {}

Default:
{}
password string
port integer

Default value is : 8000

Default: 8000
exclusiveMin=0
stopAfter string[]
useTls boolean

Default value is : false

Default: false
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.terraform.cli.TerraformCLI object
Examples

Initialize Terraform, then create and apply the Terraform plan

id: git_terraform
namespace: company.team

tasks:
  - id: git
    type: io.kestra.plugin.core.flow.WorkingDirectory
    tasks:
      - id: clone_repository
        type: io.kestra.plugin.git.Clone
        url: https://github.com/anna-geller/kestra-ci-cd
        branch: main

      - id: terraform
        type: io.kestra.plugin.terraform.cli.TerraformCLI
        beforeCommands:
          - terraform init
        inputFiles:
          terraform.tfvars: |
            username            = "cicd"
            password            = "{{ secret('CI_CD_PASSWORD') }}"
            hostname            = "https://demo.kestra.io"
        outputFiles:
          - "*.txt"
        commands:
          - terraform plan 2>&1 | tee plan_output.txt
          - terraform apply -auto-approve 2>&1 | tee apply_output.txt
        env:
          AWS_ACCESS_KEY_ID: "{{ secret('AWS_ACCESS_KEY_ID') }}"
          AWS_SECRET_ACCESS_KEY: "{{ secret('AWS_SECRET_ACCESS_KEY') }}"
          AWS_DEFAULT_REGION: "{{ secret('AWS_DEFAULT_REGION') }}"

commands string[] required
minItems=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.terraform.cli.TerraformCLI" required
Constant: "io.kestra.plugin.terraform.cli.TerraformCLI"
allowFailure boolean

Default value is : false

Default: false
beforeCommands string[]
containerImage string

Default value is : hashicorp/terraform

Default: "hashicorp/terraform"
description string
disabled boolean

Default value is : false

Default: false
docker
All of: io.kestra.plugin.scripts.exec.scripts.models.DockerOptions object, Deprecated, use 'taskRunner' instead
env Record<string, string>
inputFiles object | string
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namespaceFiles
All of: io.kestra.core.models.tasks.NamespaceFiles object, Inject namespace files.
outputFiles string[]

Must be a list of glob expressions relative to the current working directory, some examples: my-dir/**, my-dir/*/** or my-dir/my-file.txt.

taskRunner
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.tika.Parse object
Examples

Extract text from a file.

id: tika_parse
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: parse
    type: io.kestra.plugin.tika.Parse
    from: '{{ inputs.file }}'
    extractEmbedded: true
    store: false

Extract text from an image using OCR.

id: tika_parse
namespace: company.team

inputs:
  - id: file
    type: FILE

tasks:
  - id: parse
    type: io.kestra.plugin.tika.Parse
    from: '{{ inputs.file }}'
    ocrOptions:
      strategy: OCR_AND_TEXT_EXTRACTION
    store: true

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.tika.Parse" required
Constant: "io.kestra.plugin.tika.Parse"
allowFailure boolean

Default value is : false

Default: false
contentType string

Default value is : XHTML

Default: "XHTML"
Values: "TEXT" "XHTML" "XHTML_NO_HEADER"
description string
disabled boolean

Default value is : false

Default: false
extractEmbedded boolean

Default value is : false

Default: false
from string

Must be an internal storage URI.

logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
ocrOptions
All of: io.kestra.plugin.tika.Parse-OcrOptions object, Custom options for OCR processing.
store boolean

Default value is : true

Default: true
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.tika.Parse-OcrOptions object
enableImagePreprocessing boolean

Apache Tika will run preprocessing of images (rotation detection and image normalizing with ImageMagick) before sending the image to Tesseract if the user has included dependencies (listed below) and if the user opts to include these preprocessing steps.

language string
strategy string

You need to install Tesseract to enable OCR processing, along with Tesseract language pack.

Default value is : NO_OCR

Default: "NO_OCR"
Values: "AUTO" "NO_OCR" "OCR_ONLY" "OCR_AND_TEXT_EXTRACTION"
io.kestra.plugin.transform.grok.TransformItems object

The TransformItems task is similar to the famous Logstash Grok filter from the ELK stack. It is particularly useful for transforming unstructured data such as logs into a structured, indexable, and queryable data structure.

The TransformItems ships with all the default patterns as defined You can find them here: https://github.com/kestra-io/plugin-transform/tree/main/plugin-transform-grok/src/main/resources/patterns.

Examples

Consume, parse, and structure logs events from Kafka topic.

id: grok_transform_items
namespace: company.team

tasks:
  - id: transform_items
    type: io.kestra.plugin.transform.grok.TransformItems
    pattern: "%{TIMESTAMP_ISO8601:logdate} %{LOGLEVEL:loglevel} %{GREEDYDATA:message}"
    from: "{{ trigger.uri }}"

triggers:
  - id: trigger
    type: io.kestra.plugin.kafka.Trigger
    topic: test_kestra
    properties:
      bootstrap.servers: localhost:9092
    serdeProperties:
      schema.registry.url: http://localhost:8085
      keyDeserializer: STRING
      valueDeserializer: STRING
    groupId: kafkaConsumerGroupId
    interval: PT30S
    maxRecords: 5

from string required

Must be a kestra:// internal storage URI.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.transform.grok.TransformItems" required
Constant: "io.kestra.plugin.transform.grok.TransformItems"
allowFailure boolean

Default value is : false

Default: false
breakOnFirstMatch boolean

The first successful match by grok will result in the task being finished. Set to false if you want the task to try all configured patterns.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
keepEmptyCaptures boolean

When an optional field cannot be captured, the empty field is retained in the output. Set false if you want empty optional fields to be filtered out.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namedCapturesOnly boolean

Default value is : true

Default: true
pattern string
patternDefinitions object

A map of pattern-name and pattern pairs defining custom patterns to be used by the current tasks. Patterns matching existing names will override the pre-existing definition.

patterns string[]
patternsDir string[]

Directories must be paths relative to the working directory.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.transform.grok.TransformValue object

The TransformValue task is similar to the famous Logstash Grok filter from the ELK stack. It is particularly useful for transforming unstructured data such as logs into a structured, indexable, and queryable data structure.

The TransformValue ships with all the default patterns as defined You can find them here: https://github.com/kestra-io/plugin-transform/tree/main/plugin-transform-grok/src/main/resources/patterns.

Examples

Consume, parse, and structure logs events from Kafka topic.

id: grok_transform_value
namespace: company.team

tasks:
  - id: transform_value
    type: io.kestra.plugin.transform.grok.TransformValue
    pattern: "%{TIMESTAMP_ISO8601:logdate} %{LOGLEVEL:loglevel} %{GREEDYDATA:message}"
    from: "{{ trigger.value }}"

  - id: log_on_warn
    type: io.kestra.plugin.core.flow.If
    condition: "{{ grok.value['LOGLEVEL'] == 'ERROR' }}"
    then:
      - id: when_true
        type: io.kestra.plugin.core.log.Log
        message: "{{ outputs.transform_value.value }}"

triggers:
  - id: realtime_trigger
    type: io.kestra.plugin.kafka.RealtimeTrigger
    topic: test_kestra
    properties:
      bootstrap.servers: localhost:9092
    serdeProperties:
      schema.registry.url: http://localhost:8085
      keyDeserializer: STRING
      valueDeserializer: STRING
    groupId: kafkaConsumerGroupId

from string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.transform.grok.TransformValue" required
Constant: "io.kestra.plugin.transform.grok.TransformValue"
allowFailure boolean

Default value is : false

Default: false
breakOnFirstMatch boolean

The first successful match by grok will result in the task being finished. Set to false if you want the task to try all configured patterns.

Default value is : true

Default: true
description string
disabled boolean

Default value is : false

Default: false
keepEmptyCaptures boolean

When an optional field cannot be captured, the empty field is retained in the output. Set false if you want empty optional fields to be filtered out.

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
namedCapturesOnly boolean

Default value is : true

Default: true
pattern string
patternDefinitions object

A map of pattern-name and pattern pairs defining custom patterns to be used by the current tasks. Patterns matching existing names will override the pre-existing definition.

patterns string[]
patternsDir string[]

Directories must be paths relative to the working directory.

timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.transform.jsonata.TransformItems object

JSONata is a sophisticated query and transformation language for JSON data.##### Examples

Transform JSON payload using JSONata expression.

id: jsonata_example
namespace: company.team

tasks:
  - id: http_download
    type: io.kestra.plugin.core.http.Download
    uri: https://dummyjson.com/products

  - id: get_product_and_brand_name
    description: "String Transformation"
    type: io.kestra.plugin.transform.jsonata.TransformItems
    from: "{{ outputs.http_download.uri }}"
    expression: products.(title & ' by ' & brand)

  - id: get_total_price
    description: "Number Transformation"
    type: io.kestra.plugin.transform.jsonata.TransformItems
    from: "{{ outputs.http_download.uri }}"
    expression: $sum(products.price)

  - id: get_discounted_price
    type: io.kestra.plugin.transform.jsonata.TransformItems
    from: "{{ outputs.http_download.uri }}"
    expression: $sum(products.(price-(price*discountPercentage/100)))

  - id: sum_up
    description: "Writing out results in the form of JSON"
    type: io.kestra.plugin.transform.jsonata.TransformItems
    from: "{{ outputs.http_download.uri }}"
    expression: |
      {
        "total_products": $count(products),
        "total_price": $sum(products.price),
        "total_discounted_price": $sum(products.(price-(price*discountPercentage/100)))
      }

expression string required
from string required

Must be a kestra:// internal storage URI.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.transform.jsonata.TransformItems" required
Constant: "io.kestra.plugin.transform.jsonata.TransformItems"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
explodeArray boolean

If the JSONata expression results in a JSON array and this property is set to true, then a record will be written for each element. Otherwise, the JSON array is kept as a single record.

Default value is : true

Default: true
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDepth integer

Default value is : 1000

Default: 1000
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.transform.jsonata.TransformValue object

JSONata is a sophisticated query and transformation language for JSON data.##### Examples

Transform JSON data using JSONata expression

id: jsonata_transform_value
namespace: company.team

tasks:
  - id: transform_json
    type: io.kestra.plugin.transform.jsonata.TransformValue
    from: |
      {
        "order_id": "ABC123",
        "first_name": "John",
        "last_name": "Doe",
        "address": {
          "city": "Paris",
          "country": "France"
        },
        "items": [
          {
            "product_id": "001",
            "name": "Apple",
            "quantity": 5,
            "price_per_unit": 0.5
          },
          {
            "product_id": "002",
            "name": "Banana",
            "quantity": 3,
            "price_per_unit": 0.3
          },
          {
            "product_id": "003",
            "name": "Orange",
            "quantity": 2,
            "price_per_unit": 0.4
          }
        ]
      }
    expression: |
      {
        "order_id": order_id,
        "customer_name": first_name & ' ' & last_name,
        "address": address.city & ', ' & address.country,
        "total_price": $sum(items.(quantity * price_per_unit))
      }

expression string required
from string required

Must be a valid JSON string.

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.transform.jsonata.TransformValue" required
Constant: "io.kestra.plugin.transform.jsonata.TransformValue"
allowFailure boolean

Default value is : false

Default: false
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
maxDepth integer

Default value is : 1000

Default: 1000
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.weaviate.BatchCreate object

Data can be either in an ION-serialized file format or as a list of key-value pairs. If the schema doesn't exist yet, it will be created automatically.##### Examples

Send batch object creation request to a Weaviate database.

id: weaviate_batch_load
namespace: company.team

tasks:
  - id: batch_load
    type: io.kestra.plugin.weaviate.BatchCreate
    url: "https://demo-cluster-id.weaviate.network"
    apiKey: "{{ secret('WEAVIATE_API_KEY') }}"
    className: WeaviateDemo
    objects:
      - textField: "some text"
        numField: 24
      - textField: "another text"
        numField: 42

Send batch object creation request to a Weaviate database using an ION input file e.g. passed from output of another task.

id: weaviate_batch_insert
namespace: company.team

tasks:
  - id: extract
    type: io.kestra.plugin.core.http.Download
    uri: https://huggingface.co/datasets/kestra/datasets/raw/main/ion/ingest.ion

  - id: batch_insert
    type: io.kestra.plugin.weaviate.BatchCreate
    url: "https://demo-cluster-id.weaviate.network"
    apiKey: "{{ secret('WEAVIATE_API_KEY') }}"
    className: Titles
    objects: "{{ outputs.extract.uri }}"

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
objects string | object[] required

ION File URI or the list of objects to insert

type const: "io.kestra.plugin.weaviate.BatchCreate" required
Constant: "io.kestra.plugin.weaviate.BatchCreate"
url string required

Example: localhost:8080 or https://cluster-id.weaviate.network

minLength=1
allowFailure boolean

Default value is : false

Default: false
apiKey string

If not provided, the anonymous authentication scheme will be used.

className string
description string
disabled boolean

Default value is : false

Default: false
headers object

Default value is : {}

Default:
{}
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.weaviate.Delete object
Examples

Send delete request to a Weaviate database. Use object ID or other properties.

id: weaviate_delete_flow
namespace: company.team

tasks:
  - id: delete
    type: io.kestra.plugin.weaviate.Delete
    url: https://demo-cluster-id.weaviate.network
    className: WeaviateObject
    filter:
      fieldName: field value to be deleted by

className string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.weaviate.Delete" required
Constant: "io.kestra.plugin.weaviate.Delete"
url string required

Example: localhost:8080 or https://cluster-id.weaviate.network

minLength=1
allowFailure boolean

Default value is : false

Default: false
apiKey string

If not provided, the anonymous authentication scheme will be used.

description string
disabled boolean

Default value is : false

Default: false
filter object
headers object

Default value is : {}

Default:
{}
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.weaviate.Query object
Examples

Execute a GraphQL query to fetch data from a Weaviate database.

id: weaviate_query
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.weaviate.Query
    url: https://demo-cluster-id.weaviate.network
    apiKey: "{{ secret('WEAVIATE_API_KEY') }}"
    query: |
      {
        Get {
          Question(limit: 5) {
            question
            answer
            category
          }
        }
      }


Query data from a Weaviate database using Generative Search with OpenAI

id: weaviate_generative_search
namespace: company.team

tasks:
  - id: query
    type: io.kestra.plugin.weaviate.Query
    url: https://demo-cluster-id.weaviate.network
    apiKey: "{{ secret('WEAVIATE_API_KEY') }}"
    headers:
      X-OpenAI-Api-Key: "{{ secret('OPENAI_API_KEY') }}"
    query: |
      {
        Get {
          Question(limit: 5, nearText: {concepts: ["biology"]}) {
            question
            answer
            category
          }
        }
      }

id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.weaviate.Query" required
Constant: "io.kestra.plugin.weaviate.Query"
url string required

Example: localhost:8080 or https://cluster-id.weaviate.network

minLength=1
allowFailure boolean

Default value is : false

Default: false
apiKey string

If not provided, the anonymous authentication scheme will be used.

description string
disabled boolean

Default value is : false

Default: false
fetchType string

FETCH_ONE outputs only the first row FETCH outputs all rows STORE stores all rows in a file NONE doesn't store any data. It's particularly useful when you execute DDL statements or run queries that insert data into another table e.g. using SELECT ... INSERT INTO statements.

Default value is : STORE

Default: "STORE"
Values: "STORE" "FETCH" "FETCH_ONE" "NONE"
headers object

Default value is : {}

Default:
{}
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
query string
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.weaviate.SchemaCreate object
Examples

Send schema creation request to a Weaviate database.

id: create_weaviate_schema
namespace: company.team

tasks:
  - id: schema
    type: io.kestra.plugin.weaviate.SchemaCreate
    url: "https://demo-cluster-id.weaviate.network"
    apiKey: "{{ secret('WEAVIATE_API_KEY') }}"
    className: Movies
    fields:
      name:
        - string
      description:
        - string
      category:
        - string
className string required
minLength=1
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.weaviate.SchemaCreate" required
Constant: "io.kestra.plugin.weaviate.SchemaCreate"
url string required

Example: localhost:8080 or https://cluster-id.weaviate.network

minLength=1
allowFailure boolean

Default value is : false

Default: false
apiKey string

If not provided, the anonymous authentication scheme will be used.

description string
disabled boolean

Default value is : false

Default: false
fields object

Requires specified field name and a list of data types that will be stored in this field

headers object

Default value is : {}

Default:
{}
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
timeout string
format=duration
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
io.kestra.plugin.zendesk.tickets.Create object
Examples

Create Zendesk ticket using username and token.

id: zendesk_flow
namespace: company.team

tasks:
  - id: create_ticket
    type: io.kestra.plugin.zendesk.tickets.Create
    domain: mycompany.zendesk.com
    username: [email protected]
    token: zendesk_api_token
    subject: "Increased 5xx in Demo Service"
    description: |
      "The number of 5xx has increased beyond the threshold for Demo service."
    priority: NORMAL
    ticketType: INCIDENT
    assigneeId: 1
    tags:
      - bug
      - workflow

Create Zendesk ticket using OAuth token.

id: zendesk_flow
namespace: company.team

tasks:
  - id: create_ticket
    type: io.kestra.plugin.zendesk.tickets.Create
    domain: mycompany.zendesk.com
    oauthToken: zendesk_oauth_token
    subject: "Increased 5xx in Demo Service"
    description: |
      "The number of 5xx has increased beyond the threshold for Demo service."
    priority: NORMAL
    ticketType: INCIDENT
    assigneeId: 1
    tags:
      - bug
      - workflow

Create a ticket when a Kestra workflow in any namespace with company as prefix fails.

id: create_ticket_on_failure
namespace: company.team

tasks:
  - id: create_ticket
    type: io.kestra.plugin.zendesk.tickets.Create
    domain: mycompany.zendesk.com
    oauthToken: zendesk_oauth_token
    subject: Workflow failed
    description: |
      "{{ execution.id }} has failed on {{ taskrun.startDate }}.
      See the link below for more details."
    priority: NORMAL
    ticketType: INCIDENT
    assigneeId: 1
    tags:
      - bug
      - workflow
triggers:
  - id: on_failure
    type: io.kestra.plugin.core.trigger.Flow
    conditions:
      - type: io.kestra.plugin.core.condition.ExecutionStatusCondition
        in:
          - FAILED
          - WARNING
      - type: io.kestra.plugin.core.condition.ExecutionNamespaceCondition
        namespace: company
        comparison: PREFIX

domain string required
id string required
minLength=1pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
type const: "io.kestra.plugin.zendesk.tickets.Create" required
Constant: "io.kestra.plugin.zendesk.tickets.Create"
allowFailure boolean

Default value is : false

Default: false
assigneeId integer
description string
disabled boolean

Default value is : false

Default: false
logLevel string
Values: "ERROR" "WARN" "INFO" "DEBUG" "TRACE"
logToFile boolean

Default value is : false

Default: false
oauthToken string
priority string

Available values:

  • URGENT
  • HIGH
  • NORMAL
  • LOW
Values: "URGENT" "HIGH" "NORMAL" "LOW"
subject string
tags string[]
ticketType string

Available values:

  • PROBLEM
  • INCIDENT
  • QUESTION
  • TASK
Values: "PROBLEM" "INCIDENT" "QUESTION" "TASK"
timeout string
format=duration
token string
username string
workerGroup object
1 nested properties
key string
pattern=^[a-zA-Z0-9][a-zA-Z0-9_-]*
java.nio.charset.Charset object